Skip to content

Commit c89511a

Browse files
Mel Gormantorvalds
authored andcommitted
mm: compaction: Restart compaction from near where it left off
This is almost entirely based on Rik's previous patches and discussions with him about how this might be implemented. Order > 0 compaction stops when enough free pages of the correct page order have been coalesced. When doing subsequent higher order allocations, it is possible for compaction to be invoked many times. However, the compaction code always starts out looking for things to compact at the start of the zone, and for free pages to compact things to at the end of the zone. This can cause quadratic behaviour, with isolate_freepages starting at the end of the zone each time, even though previous invocations of the compaction code already filled up all free memory on that end of the zone. This can cause isolate_freepages to take enormous amounts of CPU with certain workloads on larger memory systems. This patch caches where the migration and free scanner should start from on subsequent compaction invocations using the pageblock-skip information. When compaction starts it begins from the cached restart points and will update the cached restart points until a page is isolated or a pageblock is skipped that would have been scanned by synchronous compaction. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Cc: Richard Davies <richard@arachsys.com> Cc: Shaohua Li <shli@kernel.org> Cc: Avi Kivity <avi@redhat.com> Acked-by: Rafael Aquini <aquini@redhat.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent bb13ffe commit c89511a

File tree

3 files changed

+56
-10
lines changed

3 files changed

+56
-10
lines changed

include/linux/mmzone.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -371,6 +371,10 @@ struct zone {
371371
int all_unreclaimable; /* All pages pinned */
372372
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
373373
unsigned long compact_blockskip_expire;
374+
375+
/* pfns where compaction scanners should start */
376+
unsigned long compact_cached_free_pfn;
377+
unsigned long compact_cached_migrate_pfn;
374378
#endif
375379
#ifdef CONFIG_MEMORY_HOTPLUG
376380
/* see spanned/present_pages for more description */

mm/compaction.c

Lines changed: 48 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,9 @@ static void reset_isolation_suitable(struct zone *zone)
8080
*/
8181
if (time_before(jiffies, zone->compact_blockskip_expire))
8282
return;
83+
84+
zone->compact_cached_migrate_pfn = start_pfn;
85+
zone->compact_cached_free_pfn = end_pfn;
8386
zone->compact_blockskip_expire = jiffies + (HZ * 5);
8487

8588
/* Walk the zone and mark every pageblock as suitable for isolation */
@@ -103,13 +106,29 @@ static void reset_isolation_suitable(struct zone *zone)
103106
* If no pages were isolated then mark this pageblock to be skipped in the
104107
* future. The information is later cleared by reset_isolation_suitable().
105108
*/
106-
static void update_pageblock_skip(struct page *page, unsigned long nr_isolated)
109+
static void update_pageblock_skip(struct compact_control *cc,
110+
struct page *page, unsigned long nr_isolated,
111+
bool migrate_scanner)
107112
{
113+
struct zone *zone = cc->zone;
108114
if (!page)
109115
return;
110116

111-
if (!nr_isolated)
117+
if (!nr_isolated) {
118+
unsigned long pfn = page_to_pfn(page);
112119
set_pageblock_skip(page);
120+
121+
/* Update where compaction should restart */
122+
if (migrate_scanner) {
123+
if (!cc->finished_update_migrate &&
124+
pfn > zone->compact_cached_migrate_pfn)
125+
zone->compact_cached_migrate_pfn = pfn;
126+
} else {
127+
if (!cc->finished_update_free &&
128+
pfn < zone->compact_cached_free_pfn)
129+
zone->compact_cached_free_pfn = pfn;
130+
}
131+
}
113132
}
114133
#else
115134
static inline bool isolation_suitable(struct compact_control *cc,
@@ -118,7 +137,9 @@ static inline bool isolation_suitable(struct compact_control *cc,
118137
return true;
119138
}
120139

121-
static void update_pageblock_skip(struct page *page, unsigned long nr_isolated)
140+
static void update_pageblock_skip(struct compact_control *cc,
141+
struct page *page, unsigned long nr_isolated,
142+
bool migrate_scanner)
122143
{
123144
}
124145
#endif /* CONFIG_COMPACTION */
@@ -327,7 +348,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
327348

328349
/* Update the pageblock-skip if the whole pageblock was scanned */
329350
if (blockpfn == end_pfn)
330-
update_pageblock_skip(valid_page, total_isolated);
351+
update_pageblock_skip(cc, valid_page, total_isolated, false);
331352

332353
return total_isolated;
333354
}
@@ -533,6 +554,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
533554
*/
534555
if (!cc->sync && last_pageblock_nr != pageblock_nr &&
535556
!migrate_async_suitable(get_pageblock_migratetype(page))) {
557+
cc->finished_update_migrate = true;
536558
goto next_pageblock;
537559
}
538560

@@ -583,6 +605,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
583605
VM_BUG_ON(PageTransCompound(page));
584606

585607
/* Successfully isolated */
608+
cc->finished_update_migrate = true;
586609
del_page_from_lru_list(page, lruvec, page_lru(page));
587610
list_add(&page->lru, migratelist);
588611
cc->nr_migratepages++;
@@ -609,7 +632,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
609632

610633
/* Update the pageblock-skip if the whole pageblock was scanned */
611634
if (low_pfn == end_pfn)
612-
update_pageblock_skip(valid_page, nr_isolated);
635+
update_pageblock_skip(cc, valid_page, nr_isolated, true);
613636

614637
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
615638

@@ -690,8 +713,10 @@ static void isolate_freepages(struct zone *zone,
690713
* looking for free pages, the search will restart here as
691714
* page migration may have returned some pages to the allocator
692715
*/
693-
if (isolated)
716+
if (isolated) {
717+
cc->finished_update_free = true;
694718
high_pfn = max(high_pfn, pfn);
719+
}
695720
}
696721

697722
/* split_free_page does not map the pages */
@@ -888,6 +913,8 @@ unsigned long compaction_suitable(struct zone *zone, int order)
888913
static int compact_zone(struct zone *zone, struct compact_control *cc)
889914
{
890915
int ret;
916+
unsigned long start_pfn = zone->zone_start_pfn;
917+
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
891918

892919
ret = compaction_suitable(zone, cc->order);
893920
switch (ret) {
@@ -900,10 +927,21 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
900927
;
901928
}
902929

903-
/* Setup to move all movable pages to the end of the zone */
904-
cc->migrate_pfn = zone->zone_start_pfn;
905-
cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
906-
cc->free_pfn &= ~(pageblock_nr_pages-1);
930+
/*
931+
* Setup to move all movable pages to the end of the zone. Used cached
932+
* information on where the scanners should start but check that it
933+
* is initialised by ensuring the values are within zone boundaries.
934+
*/
935+
cc->migrate_pfn = zone->compact_cached_migrate_pfn;
936+
cc->free_pfn = zone->compact_cached_free_pfn;
937+
if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
938+
cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
939+
zone->compact_cached_free_pfn = cc->free_pfn;
940+
}
941+
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
942+
cc->migrate_pfn = start_pfn;
943+
zone->compact_cached_migrate_pfn = cc->migrate_pfn;
944+
}
907945

908946
/* Clear pageblock skip if there are numerous alloc failures */
909947
if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT)

mm/internal.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,10 @@ struct compact_control {
121121
unsigned long migrate_pfn; /* isolate_migratepages search base */
122122
bool sync; /* Synchronous migration */
123123
bool ignore_skip_hint; /* Scan blocks even if marked skip */
124+
bool finished_update_free; /* True when the zone cached pfns are
125+
* no longer being updated
126+
*/
127+
bool finished_update_migrate;
124128

125129
int order; /* order a direct compactor needs */
126130
int migratetype; /* MOVABLE, RECLAIMABLE etc */

0 commit comments

Comments
 (0)