Skip to content

Commit 0a79cda

Browse files
gormanmtorvalds
authored andcommitted
mm: use alloc_flags to record if kswapd can wake
This is a preparation patch that copies the GFP flag __GFP_KSWAPD_RECLAIM into alloc_flags. This is a preparation patch only that avoids having to pass gfp_mask through a long callchain in a future patch. Note that the setting in the fast path happens in alloc_flags_nofragment() and it may be claimed that this has nothing to do with ALLOC_NO_FRAGMENT. That's true in this patch but is not true later so it's done now for easier review to show where the flag needs to be recorded. No functional change. [mgorman@techsingularity.net: ALLOC_KSWAPD flag needs to be applied in the !CONFIG_ZONE_DMA32 case] Link: http://lkml.kernel.org/r/20181126143503.GO23260@techsingularity.net Link: http://lkml.kernel.org/r/20181123114528.28802-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zi Yan <zi.yan@cs.rutgers.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent a921444 commit 0a79cda

File tree

2 files changed

+19
-15
lines changed

2 files changed

+19
-15
lines changed

mm/internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -499,6 +499,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
499499
#else
500500
#define ALLOC_NOFRAGMENT 0x0
501501
#endif
502+
#define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */
502503

503504
enum ttu_flags;
504505
struct tlbflush_unmap_batch;

mm/page_alloc.c

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3268,7 +3268,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
32683268
}
32693269
#endif /* CONFIG_NUMA */
32703270

3271-
#ifdef CONFIG_ZONE_DMA32
32723271
/*
32733272
* The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
32743273
* fragmentation is subtle. If the preferred zone was HIGHMEM then
@@ -3278,10 +3277,16 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
32783277
* fragmentation between the Normal and DMA32 zones.
32793278
*/
32803279
static inline unsigned int
3281-
alloc_flags_nofragment(struct zone *zone)
3280+
alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
32823281
{
3282+
unsigned int alloc_flags = 0;
3283+
3284+
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3285+
alloc_flags |= ALLOC_KSWAPD;
3286+
3287+
#ifdef CONFIG_ZONE_DMA32
32833288
if (zone_idx(zone) != ZONE_NORMAL)
3284-
return 0;
3289+
goto out;
32853290

32863291
/*
32873292
* If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
@@ -3290,17 +3295,12 @@ alloc_flags_nofragment(struct zone *zone)
32903295
*/
32913296
BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
32923297
if (nr_online_nodes > 1 && !populated_zone(--zone))
3293-
return 0;
3298+
goto out;
32943299

3295-
return ALLOC_NOFRAGMENT;
3296-
}
3297-
#else
3298-
static inline unsigned int
3299-
alloc_flags_nofragment(struct zone *zone)
3300-
{
3301-
return 0;
3300+
out:
3301+
#endif /* CONFIG_ZONE_DMA32 */
3302+
return alloc_flags;
33023303
}
3303-
#endif
33043304

33053305
/*
33063306
* get_page_from_freelist goes through the zonelist trying to allocate
@@ -3939,6 +3939,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
39393939
} else if (unlikely(rt_task(current)) && !in_interrupt())
39403940
alloc_flags |= ALLOC_HARDER;
39413941

3942+
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3943+
alloc_flags |= ALLOC_KSWAPD;
3944+
39423945
#ifdef CONFIG_CMA
39433946
if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
39443947
alloc_flags |= ALLOC_CMA;
@@ -4170,7 +4173,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
41704173
if (!ac->preferred_zoneref->zone)
41714174
goto nopage;
41724175

4173-
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4176+
if (alloc_flags & ALLOC_KSWAPD)
41744177
wake_all_kswapds(order, gfp_mask, ac);
41754178

41764179
/*
@@ -4228,7 +4231,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
42284231

42294232
retry:
42304233
/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4231-
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4234+
if (alloc_flags & ALLOC_KSWAPD)
42324235
wake_all_kswapds(order, gfp_mask, ac);
42334236

42344237
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
@@ -4451,7 +4454,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
44514454
* Forbid the first pass from falling back to types that fragment
44524455
* memory until all local zones are considered.
44534456
*/
4454-
alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone);
4457+
alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
44554458

44564459
/* First allocation attempt */
44574460
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);

0 commit comments

Comments
 (0)