Skip to content

Commit 4eb7dce

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/page_alloc: factor out fallback freepage checking
This is preparation step to use page allocator's anti fragmentation logic in compaction. This patch just separates fallback freepage checking part from fallback freepage management part. Therefore, there is no functional change. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent dc67647 commit 4eb7dce

File tree

1 file changed

+91
-52
lines changed

1 file changed

+91
-52
lines changed

mm/page_alloc.c

Lines changed: 91 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1145,34 +1145,81 @@ static void change_pageblock_range(struct page *pageblock_page,
11451145
* as fragmentation caused by those allocations polluting movable pageblocks
11461146
* is worse than movable allocations stealing from unmovable and reclaimable
11471147
* pageblocks.
1148-
*
1149-
* If we claim more than half of the pageblock, change pageblock's migratetype
1150-
* as well.
11511148
*/
1152-
static void try_to_steal_freepages(struct zone *zone, struct page *page,
1153-
int start_type, int fallback_type)
1149+
static bool can_steal_fallback(unsigned int order, int start_mt)
1150+
{
1151+
/*
1152+
* Leaving this order check is intended, although there is
1153+
* relaxed order check in next check. The reason is that
1154+
* we can actually steal whole pageblock if this condition met,
1155+
* but, below check doesn't guarantee it and that is just heuristic
1156+
* so could be changed anytime.
1157+
*/
1158+
if (order >= pageblock_order)
1159+
return true;
1160+
1161+
if (order >= pageblock_order / 2 ||
1162+
start_mt == MIGRATE_RECLAIMABLE ||
1163+
start_mt == MIGRATE_UNMOVABLE ||
1164+
page_group_by_mobility_disabled)
1165+
return true;
1166+
1167+
return false;
1168+
}
1169+
1170+
/*
1171+
* This function implements actual steal behaviour. If order is large enough,
1172+
* we can steal whole pageblock. If not, we first move freepages in this
1173+
* pageblock and check whether half of pages are moved or not. If half of
1174+
* pages are moved, we can change migratetype of pageblock and permanently
1175+
* use it's pages as requested migratetype in the future.
1176+
*/
1177+
static void steal_suitable_fallback(struct zone *zone, struct page *page,
1178+
int start_type)
11541179
{
11551180
int current_order = page_order(page);
1181+
int pages;
11561182

11571183
/* Take ownership for orders >= pageblock_order */
11581184
if (current_order >= pageblock_order) {
11591185
change_pageblock_range(page, current_order, start_type);
11601186
return;
11611187
}
11621188

1163-
if (current_order >= pageblock_order / 2 ||
1164-
start_type == MIGRATE_RECLAIMABLE ||
1165-
start_type == MIGRATE_UNMOVABLE ||
1166-
page_group_by_mobility_disabled) {
1167-
int pages;
1189+
pages = move_freepages_block(zone, page, start_type);
1190+
1191+
/* Claim the whole block if over half of it is free */
1192+
if (pages >= (1 << (pageblock_order-1)) ||
1193+
page_group_by_mobility_disabled)
1194+
set_pageblock_migratetype(page, start_type);
1195+
}
1196+
1197+
/* Check whether there is a suitable fallback freepage with requested order. */
1198+
static int find_suitable_fallback(struct free_area *area, unsigned int order,
1199+
int migratetype, bool *can_steal)
1200+
{
1201+
int i;
1202+
int fallback_mt;
1203+
1204+
if (area->nr_free == 0)
1205+
return -1;
1206+
1207+
*can_steal = false;
1208+
for (i = 0;; i++) {
1209+
fallback_mt = fallbacks[migratetype][i];
1210+
if (fallback_mt == MIGRATE_RESERVE)
1211+
break;
1212+
1213+
if (list_empty(&area->free_list[fallback_mt]))
1214+
continue;
11681215

1169-
pages = move_freepages_block(zone, page, start_type);
1216+
if (can_steal_fallback(order, migratetype))
1217+
*can_steal = true;
11701218

1171-
/* Claim the whole block if over half of it is free */
1172-
if (pages >= (1 << (pageblock_order-1)) ||
1173-
page_group_by_mobility_disabled)
1174-
set_pageblock_migratetype(page, start_type);
1219+
return fallback_mt;
11751220
}
1221+
1222+
return -1;
11761223
}
11771224

11781225
/* Remove an element from the buddy allocator from the fallback list */
@@ -1182,53 +1229,45 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
11821229
struct free_area *area;
11831230
unsigned int current_order;
11841231
struct page *page;
1232+
int fallback_mt;
1233+
bool can_steal;
11851234

11861235
/* Find the largest possible block of pages in the other list */
11871236
for (current_order = MAX_ORDER-1;
11881237
current_order >= order && current_order <= MAX_ORDER-1;
11891238
--current_order) {
1190-
int i;
1191-
for (i = 0;; i++) {
1192-
int migratetype = fallbacks[start_migratetype][i];
1193-
int buddy_type = start_migratetype;
1194-
1195-
/* MIGRATE_RESERVE handled later if necessary */
1196-
if (migratetype == MIGRATE_RESERVE)
1197-
break;
1198-
1199-
area = &(zone->free_area[current_order]);
1200-
if (list_empty(&area->free_list[migratetype]))
1201-
continue;
1202-
1203-
page = list_entry(area->free_list[migratetype].next,
1204-
struct page, lru);
1205-
area->nr_free--;
1206-
1207-
try_to_steal_freepages(zone, page, start_migratetype,
1208-
migratetype);
1239+
area = &(zone->free_area[current_order]);
1240+
fallback_mt = find_suitable_fallback(area, current_order,
1241+
start_migratetype, &can_steal);
1242+
if (fallback_mt == -1)
1243+
continue;
12091244

1210-
/* Remove the page from the freelists */
1211-
list_del(&page->lru);
1212-
rmv_page_order(page);
1245+
page = list_entry(area->free_list[fallback_mt].next,
1246+
struct page, lru);
1247+
if (can_steal)
1248+
steal_suitable_fallback(zone, page, start_migratetype);
12131249

1214-
expand(zone, page, order, current_order, area,
1215-
buddy_type);
1250+
/* Remove the page from the freelists */
1251+
area->nr_free--;
1252+
list_del(&page->lru);
1253+
rmv_page_order(page);
12161254

1217-
/*
1218-
* The freepage_migratetype may differ from pageblock's
1219-
* migratetype depending on the decisions in
1220-
* try_to_steal_freepages(). This is OK as long as it
1221-
* does not differ for MIGRATE_CMA pageblocks. For CMA
1222-
* we need to make sure unallocated pages flushed from
1223-
* pcp lists are returned to the correct freelist.
1224-
*/
1225-
set_freepage_migratetype(page, buddy_type);
1255+
expand(zone, page, order, current_order, area,
1256+
start_migratetype);
1257+
/*
1258+
* The freepage_migratetype may differ from pageblock's
1259+
* migratetype depending on the decisions in
1260+
* try_to_steal_freepages(). This is OK as long as it
1261+
* does not differ for MIGRATE_CMA pageblocks. For CMA
1262+
* we need to make sure unallocated pages flushed from
1263+
* pcp lists are returned to the correct freelist.
1264+
*/
1265+
set_freepage_migratetype(page, start_migratetype);
12261266

1227-
trace_mm_page_alloc_extfrag(page, order, current_order,
1228-
start_migratetype, migratetype);
1267+
trace_mm_page_alloc_extfrag(page, order, current_order,
1268+
start_migratetype, fallback_mt);
12291269

1230-
return page;
1231-
}
1270+
return page;
12321271
}
12331272

12341273
return NULL;

0 commit comments

Comments
 (0)