Skip to content

Commit 50a53bb

Browse files
committed
Merge branch 'akpm' (Fixes from Andrew)
Merge misc fixes from Andrew Morton: "Seven fixes, some of them fingers-crossed :(" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (7 patches) drivers/rtc/rtc-tps65910.c: fix invalid pointer access on _remove() mm: soft offline: split thp at the beginning of soft_offline_page() mm: avoid waking kswapd for THP allocations when compaction is deferred or contended revert "Revert "mm: remove __GFP_NO_KSWAPD"" mm: vmscan: fix endless loop in kswapd balancing mm/vmemmap: fix wrong use of virt_to_page mm: compaction: fix return value of capture_free_page()
2 parents 73efd00 + 1430e17 commit 50a53bb

File tree

8 files changed

+68
-43
lines changed

8 files changed

+68
-43
lines changed

drivers/mtd/mtdcore.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
10771077
* until the request succeeds or until the allocation size falls below
10781078
* the system page size. This attempts to make sure it does not adversely
10791079
* impact system performance, so when allocating more than one page, we
1080-
* ask the memory allocator to avoid re-trying, swapping, writing back
1081-
* or performing I/O.
1080+
* ask the memory allocator to avoid re-trying.
10821081
*
10831082
* Note, this function also makes sure that the allocated buffer is aligned to
10841083
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
@@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
10921091
*/
10931092
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
10941093
{
1095-
gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1096-
__GFP_NORETRY | __GFP_NO_KSWAPD;
1094+
gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
10971095
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
10981096
void *kbuf;
10991097

drivers/rtc/rtc-tps65910.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -288,11 +288,11 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
288288
static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
289289
{
290290
/* leave rtc running, but disable irqs */
291-
struct rtc_device *rtc = platform_get_drvdata(pdev);
291+
struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev);
292292

293-
tps65910_rtc_alarm_irq_enable(&rtc->dev, 0);
293+
tps65910_rtc_alarm_irq_enable(&pdev->dev, 0);
294294

295-
rtc_device_unregister(rtc);
295+
rtc_device_unregister(tps_rtc->rtc);
296296
return 0;
297297
}
298298

include/linux/gfp.h

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,9 @@ struct vm_area_struct;
3030
#define ___GFP_HARDWALL 0x20000u
3131
#define ___GFP_THISNODE 0x40000u
3232
#define ___GFP_RECLAIMABLE 0x80000u
33-
#define ___GFP_NOTRACK 0x200000u
34-
#define ___GFP_NO_KSWAPD 0x400000u
35-
#define ___GFP_OTHER_NODE 0x800000u
36-
#define ___GFP_WRITE 0x1000000u
33+
#define ___GFP_NOTRACK 0x100000u
34+
#define ___GFP_OTHER_NODE 0x200000u
35+
#define ___GFP_WRITE 0x400000u
3736

3837
/*
3938
* GFP bitmasks..
@@ -86,7 +85,6 @@ struct vm_area_struct;
8685
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
8786
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
8887

89-
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
9088
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
9189
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
9290

@@ -96,7 +94,7 @@ struct vm_area_struct;
9694
*/
9795
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
9896

99-
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
97+
#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
10098
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
10199

102100
/* This equals 0, but use constants in case they ever change */
@@ -116,8 +114,7 @@ struct vm_area_struct;
116114
__GFP_MOVABLE)
117115
#define GFP_IOFS (__GFP_IO | __GFP_FS)
118116
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
119-
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
120-
__GFP_NO_KSWAPD)
117+
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
121118

122119
#ifdef CONFIG_NUMA
123120
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)

include/trace/events/gfpflags.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
3737
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
3838
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
39-
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
4039
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
4140
) : "GFP_NOWAIT"
4241

mm/memory-failure.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1476,9 +1476,17 @@ int soft_offline_page(struct page *page, int flags)
14761476
{
14771477
int ret;
14781478
unsigned long pfn = page_to_pfn(page);
1479+
struct page *hpage = compound_trans_head(page);
14791480

14801481
if (PageHuge(page))
14811482
return soft_offline_huge_page(page, flags);
1483+
if (PageTransHuge(hpage)) {
1484+
if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
1485+
pr_info("soft offline: %#lx: failed to split THP\n",
1486+
pfn);
1487+
return -EBUSY;
1488+
}
1489+
}
14821490

14831491
ret = get_any_page(page, pfn, flags);
14841492
if (ret < 0)

mm/page_alloc.c

Lines changed: 28 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
14221422
}
14231423
}
14241424

1425-
return 1UL << order;
1425+
return 1UL << alloc_order;
14261426
}
14271427

14281428
/*
@@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
23782378
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
23792379
}
23802380

2381+
/* Returns true if the allocation is likely for THP */
2382+
static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
2383+
{
2384+
if (order == pageblock_order &&
2385+
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
2386+
return true;
2387+
return false;
2388+
}
2389+
23812390
static inline struct page *
23822391
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
23832392
struct zonelist *zonelist, enum zone_type high_zoneidx,
@@ -2416,9 +2425,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
24162425
goto nopage;
24172426

24182427
restart:
2419-
if (!(gfp_mask & __GFP_NO_KSWAPD))
2428+
/* The decision whether to wake kswapd for THP is made later */
2429+
if (!is_thp_alloc(gfp_mask, order))
24202430
wake_all_kswapd(order, zonelist, high_zoneidx,
2421-
zone_idx(preferred_zone));
2431+
zone_idx(preferred_zone));
24222432

24232433
/*
24242434
* OK, we're below the kswapd watermark and have kicked background
@@ -2488,15 +2498,21 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
24882498
goto got_pg;
24892499
sync_migration = true;
24902500

2491-
/*
2492-
* If compaction is deferred for high-order allocations, it is because
2493-
* sync compaction recently failed. In this is the case and the caller
2494-
* requested a movable allocation that does not heavily disrupt the
2495-
* system then fail the allocation instead of entering direct reclaim.
2496-
*/
2497-
if ((deferred_compaction || contended_compaction) &&
2498-
(gfp_mask & __GFP_NO_KSWAPD))
2499-
goto nopage;
2501+
if (is_thp_alloc(gfp_mask, order)) {
2502+
/*
2503+
* If compaction is deferred for high-order allocations, it is
2504+
* because sync compaction recently failed. If this is the case
2505+
* and the caller requested a movable allocation that does not
2506+
* heavily disrupt the system then fail the allocation instead
2507+
* of entering direct reclaim.
2508+
*/
2509+
if (deferred_compaction || contended_compaction)
2510+
goto nopage;
2511+
2512+
/* If process is willing to reclaim/compact then wake kswapd */
2513+
wake_all_kswapd(order, zonelist, high_zoneidx,
2514+
zone_idx(preferred_zone));
2515+
}
25002516

25012517
/* Try direct reclaim and then allocating */
25022518
page = __alloc_pages_direct_reclaim(gfp_mask, order,

mm/sparse.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -617,7 +617,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
617617
{
618618
return; /* XXX: Not implemented yet */
619619
}
620-
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
620+
static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
621621
{
622622
}
623623
#else
@@ -658,10 +658,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
658658
get_order(sizeof(struct page) * nr_pages));
659659
}
660660

661-
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
661+
static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
662662
{
663663
unsigned long maps_section_nr, removing_section_nr, i;
664664
unsigned long magic;
665+
struct page *page = virt_to_page(memmap);
665666

666667
for (i = 0; i < nr_pages; i++, page++) {
667668
magic = (unsigned long) page->lru.next;
@@ -710,13 +711,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
710711
*/
711712

712713
if (memmap) {
713-
struct page *memmap_page;
714-
memmap_page = virt_to_page(memmap);
715-
716714
nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
717715
>> PAGE_SHIFT;
718716

719-
free_map_bootmem(memmap_page, nr_pages);
717+
free_map_bootmem(memmap, nr_pages);
720718
}
721719
}
722720

mm/vmscan.c

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2414,6 +2414,19 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
24142414
} while (memcg);
24152415
}
24162416

2417+
static bool zone_balanced(struct zone *zone, int order,
2418+
unsigned long balance_gap, int classzone_idx)
2419+
{
2420+
if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
2421+
balance_gap, classzone_idx, 0))
2422+
return false;
2423+
2424+
if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
2425+
return false;
2426+
2427+
return true;
2428+
}
2429+
24172430
/*
24182431
* pgdat_balanced is used when checking if a node is balanced for high-order
24192432
* allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2492,8 +2505,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
24922505
continue;
24932506
}
24942507

2495-
if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2496-
i, 0))
2508+
if (!zone_balanced(zone, order, 0, i))
24972509
all_zones_ok = false;
24982510
else
24992511
balanced += zone->present_pages;
@@ -2602,8 +2614,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
26022614
break;
26032615
}
26042616

2605-
if (!zone_watermark_ok_safe(zone, order,
2606-
high_wmark_pages(zone), 0, 0)) {
2617+
if (!zone_balanced(zone, order, 0, 0)) {
26072618
end_zone = i;
26082619
break;
26092620
} else {
@@ -2679,9 +2690,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
26792690
testorder = 0;
26802691

26812692
if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2682-
!zone_watermark_ok_safe(zone, testorder,
2683-
high_wmark_pages(zone) + balance_gap,
2684-
end_zone, 0)) {
2693+
!zone_balanced(zone, testorder,
2694+
balance_gap, end_zone)) {
26852695
shrink_zone(zone, &sc);
26862696

26872697
reclaim_state->reclaimed_slab = 0;
@@ -2708,8 +2718,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
27082718
continue;
27092719
}
27102720

2711-
if (!zone_watermark_ok_safe(zone, testorder,
2712-
high_wmark_pages(zone), end_zone, 0)) {
2721+
if (!zone_balanced(zone, testorder, 0, end_zone)) {
27132722
all_zones_ok = 0;
27142723
/*
27152724
* We are still under min water mark. This

0 commit comments

Comments
 (0)