Skip to content

Commit 1f947a7

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "6 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: proc: smaps_rollup: fix pss_locked calculation Rename include/{uapi => }/asm-generic/shmparam.h really Revert "mm: use early_pfn_to_nid in page_ext_init" mm/gup: fix gup_pmd_range() for dax Revert "mm: slowly shrink slabs with a relatively small number of objects" Revert "mm: don't reclaim inodes with many attached pages"
2 parents 991b9eb + 27dd768 commit 1f947a7

File tree

7 files changed

+21
-28
lines changed

7 files changed

+21
-28
lines changed

fs/inode.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
730730
return LRU_REMOVED;
731731
}
732732

733-
/*
734-
* Recently referenced inodes and inodes with many attached pages
735-
* get one more pass.
736-
*/
737-
if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
733+
/* recently referenced inodes get one more pass */
734+
if (inode->i_state & I_REFERENCED) {
738735
inode->i_state &= ~I_REFERENCED;
739736
spin_unlock(&inode->i_lock);
740737
return LRU_ROTATE;

fs/proc/task_mmu.c

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@ struct mem_size_stats {
423423
};
424424

425425
static void smaps_account(struct mem_size_stats *mss, struct page *page,
426-
bool compound, bool young, bool dirty)
426+
bool compound, bool young, bool dirty, bool locked)
427427
{
428428
int i, nr = compound ? 1 << compound_order(page) : 1;
429429
unsigned long size = nr * PAGE_SIZE;
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
450450
else
451451
mss->private_clean += size;
452452
mss->pss += (u64)size << PSS_SHIFT;
453+
if (locked)
454+
mss->pss_locked += (u64)size << PSS_SHIFT;
453455
return;
454456
}
455457

456458
for (i = 0; i < nr; i++, page++) {
457459
int mapcount = page_mapcount(page);
460+
unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
458461

459462
if (mapcount >= 2) {
460463
if (dirty || PageDirty(page))
461464
mss->shared_dirty += PAGE_SIZE;
462465
else
463466
mss->shared_clean += PAGE_SIZE;
464-
mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
467+
mss->pss += pss / mapcount;
468+
if (locked)
469+
mss->pss_locked += pss / mapcount;
465470
} else {
466471
if (dirty || PageDirty(page))
467472
mss->private_dirty += PAGE_SIZE;
468473
else
469474
mss->private_clean += PAGE_SIZE;
470-
mss->pss += PAGE_SIZE << PSS_SHIFT;
475+
mss->pss += pss;
476+
if (locked)
477+
mss->pss_locked += pss;
471478
}
472479
}
473480
}
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
490497
{
491498
struct mem_size_stats *mss = walk->private;
492499
struct vm_area_struct *vma = walk->vma;
500+
bool locked = !!(vma->vm_flags & VM_LOCKED);
493501
struct page *page = NULL;
494502

495503
if (pte_present(*pte)) {
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
532540
if (!page)
533541
return;
534542

535-
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
543+
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
536544
}
537545

538546
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
541549
{
542550
struct mem_size_stats *mss = walk->private;
543551
struct vm_area_struct *vma = walk->vma;
552+
bool locked = !!(vma->vm_flags & VM_LOCKED);
544553
struct page *page;
545554

546555
/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
555564
/* pass */;
556565
else
557566
VM_BUG_ON_PAGE(1, page);
558-
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
567+
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
559568
}
560569
#else
561570
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
737746
}
738747
}
739748
#endif
740-
741749
/* mmap_sem is held in m_start */
742750
walk_page_vma(vma, &smaps_walk);
743-
if (vma->vm_flags & VM_LOCKED)
744-
mss->pss_locked += mss->pss;
745751
}
746752

747753
#define SEQ_PUT_DEC(str, val) \
File renamed without changes.

init/main.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void)
695695
initrd_start = 0;
696696
}
697697
#endif
698-
page_ext_init();
699698
kmemleak_init();
700699
setup_per_cpu_pageset();
701700
numa_policy_init();
@@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void)
11311130
sched_init_smp();
11321131

11331132
page_alloc_init_late();
1133+
/* Initialize page ext after all struct pages are initialized. */
1134+
page_ext_init();
11341135

11351136
do_basic_setup();
11361137

mm/gup.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
16741674
if (!pmd_present(pmd))
16751675
return 0;
16761676

1677-
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1677+
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
1678+
pmd_devmap(pmd))) {
16781679
/*
16791680
* NUMA hinting faults need to be handled in the GUP
16801681
* slowpath for accounting purposes and so that they

mm/page_ext.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -398,10 +398,8 @@ void __init page_ext_init(void)
398398
* We know some arch can have a nodes layout such as
399399
* -------------pfn-------------->
400400
* N0 | N1 | N2 | N0 | N1 | N2|....
401-
*
402-
* Take into account DEFERRED_STRUCT_PAGE_INIT.
403401
*/
404-
if (early_pfn_to_nid(pfn) != nid)
402+
if (pfn_to_nid(pfn) != nid)
405403
continue;
406404
if (init_section_page_ext(pfn, nid))
407405
goto oom;

mm/vmscan.c

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
491491
delta = freeable / 2;
492492
}
493493

494-
/*
495-
* Make sure we apply some minimal pressure on default priority
496-
* even on small cgroups. Stale objects are not only consuming memory
497-
* by themselves, but can also hold a reference to a dying cgroup,
498-
* preventing it from being reclaimed. A dying cgroup with all
499-
* corresponding structures like per-cpu stats and kmem caches
500-
* can be really big, so it may lead to a significant waste of memory.
501-
*/
502-
delta = max_t(unsigned long long, delta, min(freeable, batch_size));
503-
504494
total_scan += delta;
505495
if (total_scan < 0) {
506496
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",

0 commit comments

Comments
 (0)