Skip to content

Commit a448f2d

Browse files
yhuang-inteltorvalds
authored andcommitted
mm/swapfile.c: unify normal/huge code path in put_swap_page()
In this patch, the normal/huge code path in put_swap_page() and several helper functions are unified to avoid duplicated code, bugs, etc. and make it easier to review the code. The removed lines are more than added lines. And the binary size is kept exactly same when CONFIG_TRANSPARENT_HUGEPAGE=n. Link: http://lkml.kernel.org/r/20180720071845.17920-6-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Suggested-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 33ee011 commit a448f2d

File tree

1 file changed

+37
-46
lines changed

1 file changed

+37
-46
lines changed

mm/swapfile.c

Lines changed: 37 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -204,8 +204,16 @@ static void discard_swap_cluster(struct swap_info_struct *si,
204204

205205
#ifdef CONFIG_THP_SWAP
206206
#define SWAPFILE_CLUSTER HPAGE_PMD_NR
207+
208+
#define swap_entry_size(size) (size)
207209
#else
208210
#define SWAPFILE_CLUSTER 256
211+
212+
/*
213+
* Define swap_entry_size() as constant to let compiler to optimize
214+
* out some code if !CONFIG_THP_SWAP
215+
*/
216+
#define swap_entry_size(size) 1
209217
#endif
210218
#define LATENCY_LIMIT 256
211219

@@ -1192,18 +1200,7 @@ void swap_free(swp_entry_t entry)
11921200
/*
11931201
* Called after dropping swapcache to decrease refcnt to swap entries.
11941202
*/
1195-
static void swapcache_free(swp_entry_t entry)
1196-
{
1197-
struct swap_info_struct *p;
1198-
1199-
p = _swap_info_get(entry);
1200-
if (p) {
1201-
if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE))
1202-
free_swap_slot(entry);
1203-
}
1204-
}
1205-
1206-
static void swapcache_free_cluster(swp_entry_t entry)
1203+
void put_swap_page(struct page *page, swp_entry_t entry)
12071204
{
12081205
unsigned long offset = swp_offset(entry);
12091206
unsigned long idx = offset / SWAPFILE_CLUSTER;
@@ -1212,39 +1209,41 @@ static void swapcache_free_cluster(swp_entry_t entry)
12121209
unsigned char *map;
12131210
unsigned int i, free_entries = 0;
12141211
unsigned char val;
1215-
1216-
if (!IS_ENABLED(CONFIG_THP_SWAP))
1217-
return;
1212+
int size = swap_entry_size(hpage_nr_pages(page));
12181213

12191214
si = _swap_info_get(entry);
12201215
if (!si)
12211216
return;
12221217

1223-
ci = lock_cluster(si, offset);
1224-
VM_BUG_ON(!cluster_is_huge(ci));
1225-
map = si->swap_map + offset;
1226-
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1227-
val = map[i];
1228-
VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1229-
if (val == SWAP_HAS_CACHE)
1230-
free_entries++;
1231-
}
1232-
if (!free_entries) {
1233-
for (i = 0; i < SWAPFILE_CLUSTER; i++)
1234-
map[i] &= ~SWAP_HAS_CACHE;
1235-
}
1236-
cluster_clear_huge(ci);
1237-
unlock_cluster(ci);
1238-
if (free_entries == SWAPFILE_CLUSTER) {
1239-
spin_lock(&si->lock);
1218+
if (size == SWAPFILE_CLUSTER) {
12401219
ci = lock_cluster(si, offset);
1241-
memset(map, 0, SWAPFILE_CLUSTER);
1220+
VM_BUG_ON(!cluster_is_huge(ci));
1221+
map = si->swap_map + offset;
1222+
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1223+
val = map[i];
1224+
VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1225+
if (val == SWAP_HAS_CACHE)
1226+
free_entries++;
1227+
}
1228+
if (!free_entries) {
1229+
for (i = 0; i < SWAPFILE_CLUSTER; i++)
1230+
map[i] &= ~SWAP_HAS_CACHE;
1231+
}
1232+
cluster_clear_huge(ci);
12421233
unlock_cluster(ci);
1243-
mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1244-
swap_free_cluster(si, idx);
1245-
spin_unlock(&si->lock);
1246-
} else if (free_entries) {
1247-
for (i = 0; i < SWAPFILE_CLUSTER; i++, entry.val++) {
1234+
if (free_entries == SWAPFILE_CLUSTER) {
1235+
spin_lock(&si->lock);
1236+
ci = lock_cluster(si, offset);
1237+
memset(map, 0, SWAPFILE_CLUSTER);
1238+
unlock_cluster(ci);
1239+
mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1240+
swap_free_cluster(si, idx);
1241+
spin_unlock(&si->lock);
1242+
return;
1243+
}
1244+
}
1245+
if (size == 1 || free_entries) {
1246+
for (i = 0; i < size; i++, entry.val++) {
12481247
if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
12491248
free_swap_slot(entry);
12501249
}
@@ -1268,14 +1267,6 @@ int split_swap_cluster(swp_entry_t entry)
12681267
}
12691268
#endif
12701269

1271-
void put_swap_page(struct page *page, swp_entry_t entry)
1272-
{
1273-
if (!PageTransHuge(page))
1274-
swapcache_free(entry);
1275-
else
1276-
swapcache_free_cluster(entry);
1277-
}
1278-
12791270
static int swp_entry_cmp(const void *ent1, const void *ent2)
12801271
{
12811272
const swp_entry_t *e1 = ent1, *e2 = ent2;

0 commit comments

Comments
 (0)