Skip to content

Commit 4fca973

Browse files
gormanmtorvalds
authored andcommitted
mm, compaction: sample pageblocks for free pages
Once fast searching finishes, there is a possibility that the linear scanner is scanning full blocks found by the fast scanner earlier. This patch uses an adaptive stride to sample pageblocks for free pages. The more consecutive full pageblocks encountered, the larger the stride until a pageblock with free pages is found. The scanners might meet slightly sooner but it is an acceptable risk given that the search of the free lists may still encounter the pages and adjust the cached PFN of the free scanner accordingly. 5.0.0-rc1 5.0.0-rc1 roundrobin-v3r17 samplefree-v3r17 Amean fault-both-1 0.00 ( 0.00%) 0.00 * 0.00%* Amean fault-both-3 2752.37 ( 0.00%) 2729.95 ( 0.81%) Amean fault-both-5 4341.69 ( 0.00%) 4397.80 ( -1.29%) Amean fault-both-7 6308.75 ( 0.00%) 6097.61 ( 3.35%) Amean fault-both-12 10241.81 ( 0.00%) 9407.15 ( 8.15%) Amean fault-both-18 13736.09 ( 0.00%) 10857.63 * 20.96%* Amean fault-both-24 16853.95 ( 0.00%) 13323.24 * 20.95%* Amean fault-both-30 15862.61 ( 0.00%) 17345.44 ( -9.35%) Amean fault-both-32 18450.85 ( 0.00%) 16892.00 ( 8.45%) The latency is mildly improved offseting some overhead from earlier patches that are prerequisites for the rest of the series. However, a major impact is on the free scan rate with an 82% reduction. 5.0.0-rc1 5.0.0-rc1 roundrobin-v3r17 samplefree-v3r17 Compaction migrate scanned 21607271 20116887 Compaction free scanned 95336406 16668703 It's also the first time in the series where the number of pages scanned by the migration scanner is greater than the free scanner due to the increased search efficiency. Link: http://lkml.kernel.org/r/20190118175136.31341-21-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: David Rientjes <rientjes@google.com> Cc: YueHaibing <yuehaibing@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent dbe2d4e commit 4fca973

File tree

1 file changed

+22
-6
lines changed

1 file changed

+22
-6
lines changed

mm/compaction.c

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
440440
unsigned long *start_pfn,
441441
unsigned long end_pfn,
442442
struct list_head *freelist,
443+
unsigned int stride,
443444
bool strict)
444445
{
445446
int nr_scanned = 0, total_isolated = 0;
@@ -449,10 +450,14 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
449450
unsigned long blockpfn = *start_pfn;
450451
unsigned int order;
451452

453+
/* Strict mode is for isolation, speed is secondary */
454+
if (strict)
455+
stride = 1;
456+
452457
cursor = pfn_to_page(blockpfn);
453458

454459
/* Isolate free pages. */
455-
for (; blockpfn < end_pfn; blockpfn++, cursor++) {
460+
for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
456461
int isolated;
457462
struct page *page = cursor;
458463

@@ -614,7 +619,7 @@ isolate_freepages_range(struct compact_control *cc,
614619
break;
615620

616621
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
617-
block_end_pfn, &freelist, true);
622+
block_end_pfn, &freelist, 0, true);
618623

619624
/*
620625
* In strict mode, isolate_freepages_block() returns 0 if
@@ -1132,15 +1137,15 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
11321137

11331138
/* Scan before */
11341139
if (start_pfn != pfn) {
1135-
isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, false);
1140+
isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
11361141
if (cc->nr_freepages >= cc->nr_migratepages)
11371142
return;
11381143
}
11391144

11401145
/* Scan after */
11411146
start_pfn = pfn + nr_isolated;
11421147
if (start_pfn != end_pfn)
1143-
isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, false);
1148+
isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
11441149

11451150
/* Skip this pageblock in the future as it's full or nearly full */
11461151
if (cc->nr_freepages < cc->nr_migratepages)
@@ -1332,6 +1337,7 @@ static void isolate_freepages(struct compact_control *cc)
13321337
unsigned long block_end_pfn; /* end of current pageblock */
13331338
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
13341339
struct list_head *freelist = &cc->freepages;
1340+
unsigned int stride;
13351341

13361342
/* Try a small search of the free lists for a candidate */
13371343
isolate_start_pfn = fast_isolate_freepages(cc);
@@ -1354,6 +1360,7 @@ static void isolate_freepages(struct compact_control *cc)
13541360
block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
13551361
zone_end_pfn(zone));
13561362
low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1363+
stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
13571364

13581365
/*
13591366
* Isolate free pages until enough are available to migrate the
@@ -1364,6 +1371,8 @@ static void isolate_freepages(struct compact_control *cc)
13641371
block_end_pfn = block_start_pfn,
13651372
block_start_pfn -= pageblock_nr_pages,
13661373
isolate_start_pfn = block_start_pfn) {
1374+
unsigned long nr_isolated;
1375+
13671376
/*
13681377
* This can iterate a massively long zone without finding any
13691378
* suitable migration targets, so periodically check resched.
@@ -1385,8 +1394,8 @@ static void isolate_freepages(struct compact_control *cc)
13851394
continue;
13861395

13871396
/* Found a block suitable for isolating free pages from. */
1388-
isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1389-
freelist, false);
1397+
nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1398+
block_end_pfn, freelist, stride, false);
13901399

13911400
/* Update the skip hint if the full pageblock was scanned */
13921401
if (isolate_start_pfn == block_end_pfn)
@@ -1410,6 +1419,13 @@ static void isolate_freepages(struct compact_control *cc)
14101419
*/
14111420
break;
14121421
}
1422+
1423+
/* Adjust stride depending on isolation */
1424+
if (nr_isolated) {
1425+
stride = 1;
1426+
continue;
1427+
}
1428+
stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
14131429
}
14141430

14151431
/*

0 commit comments

Comments
 (0)