Skip to content

Commit dc67647

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/cma: change fallback behaviour for CMA freepage
Freepage with MIGRATE_CMA can be used only for MIGRATE_MOVABLE and they should not be expanded to other migratetype buddy list to protect them from unmovable/reclaimable allocation. Implementing these requirements in __rmqueue_fallback(), that is, finding largest possible block of freepage has bad effect that high order freepage with MIGRATE_CMA are broken continually although there are suitable order CMA freepage. Reason is that they are not be expanded to other migratetype buddy list and next __rmqueue_fallback() invocation try to finds another largest block of freepage and break it again. So, MIGRATE_CMA fallback should be handled separately. This patch introduces __rmqueue_cma_fallback(), that just wrapper of __rmqueue_smallest() and call it before __rmqueue_fallback() if migratetype == MIGRATE_MOVABLE. This results in unintended behaviour change that MIGRATE_CMA freepage is always used first rather than other migratetype as movable allocation's fallback. But, as already mentioned above, MIGRATE_CMA can be used only for MIGRATE_MOVABLE, so it is better to use MIGRATE_CMA freepage first as much as possible. Otherwise, we needlessly take up precious freepages with other migratetype and increase chance of fragmentation. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 30467e0 commit dc67647

File tree

1 file changed

+19
-17
lines changed

1 file changed

+19
-17
lines changed

mm/page_alloc.c

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1032,18 +1032,27 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
10321032
static int fallbacks[MIGRATE_TYPES][4] = {
10331033
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
10341034
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
1035+
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
10351036
#ifdef CONFIG_CMA
1036-
[MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
10371037
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
1038-
#else
1039-
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
10401038
#endif
10411039
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
10421040
#ifdef CONFIG_MEMORY_ISOLATION
10431041
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
10441042
#endif
10451043
};
10461044

1045+
#ifdef CONFIG_CMA
1046+
static struct page *__rmqueue_cma_fallback(struct zone *zone,
1047+
unsigned int order)
1048+
{
1049+
return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1050+
}
1051+
#else
1052+
static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1053+
unsigned int order) { return NULL; }
1054+
#endif
1055+
10471056
/*
10481057
* Move the free pages in a range to the free lists of the requested type.
10491058
* Note that start_page and end_pages are not aligned on a pageblock
@@ -1195,19 +1204,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
11951204
struct page, lru);
11961205
area->nr_free--;
11971206

1198-
if (!is_migrate_cma(migratetype)) {
1199-
try_to_steal_freepages(zone, page,
1200-
start_migratetype,
1201-
migratetype);
1202-
} else {
1203-
/*
1204-
* When borrowing from MIGRATE_CMA, we need to
1205-
* release the excess buddy pages to CMA
1206-
* itself, and we do not try to steal extra
1207-
* free pages.
1208-
*/
1209-
buddy_type = migratetype;
1210-
}
1207+
try_to_steal_freepages(zone, page, start_migratetype,
1208+
migratetype);
12111209

12121210
/* Remove the page from the freelists */
12131211
list_del(&page->lru);
@@ -1249,7 +1247,11 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
12491247
page = __rmqueue_smallest(zone, order, migratetype);
12501248

12511249
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1252-
page = __rmqueue_fallback(zone, order, migratetype);
1250+
if (migratetype == MIGRATE_MOVABLE)
1251+
page = __rmqueue_cma_fallback(zone, order);
1252+
1253+
if (!page)
1254+
page = __rmqueue_fallback(zone, order, migratetype);
12531255

12541256
/*
12551257
* Use MIGRATE_RESERVE rather than fail an allocation. goto

0 commit comments

Comments
 (0)