Skip to content

Commit 4b9eaf3

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "7 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/memory_hotplug.c: initialize per_cpu_nodestats for hotadded pgdats mm, oom: fix uninitialized ret in task_will_free_mem() kasan: remove the unnecessary WARN_ONCE from quarantine.c mm: memcontrol: fix memcg id ref counter on swap charge move mm: memcontrol: fix swap counter leak on swapout from offline cgroup proc, meminfo: use correct helpers for calculating LRU sizes in meminfo mm/hugetlb: fix incorrect hugepages count during mem hotplug
2 parents d3396e1 + 5830169 commit 4b9eaf3

File tree

7 files changed

+64
-20
lines changed

7 files changed

+64
-20
lines changed

fs/proc/meminfo.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
4646
cached = 0;
4747

4848
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
49-
pages[lru] = global_page_state(NR_LRU_BASE + lru);
49+
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5050

5151
available = si_mem_available();
5252

mm/hugetlb.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page)
14481448
list_del(&page->lru);
14491449
h->free_huge_pages--;
14501450
h->free_huge_pages_node[nid]--;
1451+
h->max_huge_pages--;
14511452
update_and_free_page(h, page);
14521453
}
14531454
spin_unlock(&hugetlb_lock);

mm/kasan/quarantine.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -217,11 +217,8 @@ void quarantine_reduce(void)
217217
new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
218218
QUARANTINE_FRACTION;
219219
percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
220-
if (WARN_ONCE(new_quarantine_size < percpu_quarantines,
221-
"Too little memory, disabling global KASAN quarantine.\n"))
222-
new_quarantine_size = 0;
223-
else
224-
new_quarantine_size -= percpu_quarantines;
220+
new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
221+
0 : new_quarantine_size - percpu_quarantines;
225222
WRITE_ONCE(quarantine_size, new_quarantine_size);
226223

227224
last = global_quarantine.head;

mm/memcontrol.c

Lines changed: 56 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -4077,14 +4077,32 @@ static struct cftype mem_cgroup_legacy_files[] = {
40774077

40784078
static DEFINE_IDR(mem_cgroup_idr);
40794079

4080-
static void mem_cgroup_id_get(struct mem_cgroup *memcg)
4080+
static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
40814081
{
4082-
atomic_inc(&memcg->id.ref);
4082+
atomic_add(n, &memcg->id.ref);
40834083
}
40844084

4085-
static void mem_cgroup_id_put(struct mem_cgroup *memcg)
4085+
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
40864086
{
4087-
if (atomic_dec_and_test(&memcg->id.ref)) {
4087+
while (!atomic_inc_not_zero(&memcg->id.ref)) {
4088+
/*
4089+
* The root cgroup cannot be destroyed, so it's refcount must
4090+
* always be >= 1.
4091+
*/
4092+
if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
4093+
VM_BUG_ON(1);
4094+
break;
4095+
}
4096+
memcg = parent_mem_cgroup(memcg);
4097+
if (!memcg)
4098+
memcg = root_mem_cgroup;
4099+
}
4100+
return memcg;
4101+
}
4102+
4103+
static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4104+
{
4105+
if (atomic_sub_and_test(n, &memcg->id.ref)) {
40884106
idr_remove(&mem_cgroup_idr, memcg->id.id);
40894107
memcg->id.id = 0;
40904108

@@ -4093,6 +4111,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg)
40934111
}
40944112
}
40954113

4114+
static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4115+
{
4116+
mem_cgroup_id_get_many(memcg, 1);
4117+
}
4118+
4119+
static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4120+
{
4121+
mem_cgroup_id_put_many(memcg, 1);
4122+
}
4123+
40964124
/**
40974125
* mem_cgroup_from_id - look up a memcg from a memcg id
40984126
* @id: the memcg id to look up
@@ -4727,16 +4755,18 @@ static void __mem_cgroup_clear_mc(void)
47274755
if (!mem_cgroup_is_root(mc.from))
47284756
page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
47294757

4758+
mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4759+
47304760
/*
47314761
* we charged both to->memory and to->memsw, so we
47324762
* should uncharge to->memory.
47334763
*/
47344764
if (!mem_cgroup_is_root(mc.to))
47354765
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
47364766

4737-
css_put_many(&mc.from->css, mc.moved_swap);
4767+
mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4768+
css_put_many(&mc.to->css, mc.moved_swap);
47384769

4739-
/* we've already done css_get(mc.to) */
47404770
mc.moved_swap = 0;
47414771
}
47424772
memcg_oom_recover(from);
@@ -5800,7 +5830,7 @@ subsys_initcall(mem_cgroup_init);
58005830
*/
58015831
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
58025832
{
5803-
struct mem_cgroup *memcg;
5833+
struct mem_cgroup *memcg, *swap_memcg;
58045834
unsigned short oldid;
58055835

58065836
VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5815,16 +5845,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
58155845
if (!memcg)
58165846
return;
58175847

5818-
mem_cgroup_id_get(memcg);
5819-
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5848+
/*
5849+
* In case the memcg owning these pages has been offlined and doesn't
5850+
* have an ID allocated to it anymore, charge the closest online
5851+
* ancestor for the swap instead and transfer the memory+swap charge.
5852+
*/
5853+
swap_memcg = mem_cgroup_id_get_online(memcg);
5854+
oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
58205855
VM_BUG_ON_PAGE(oldid, page);
5821-
mem_cgroup_swap_statistics(memcg, true);
5856+
mem_cgroup_swap_statistics(swap_memcg, true);
58225857

58235858
page->mem_cgroup = NULL;
58245859

58255860
if (!mem_cgroup_is_root(memcg))
58265861
page_counter_uncharge(&memcg->memory, 1);
58275862

5863+
if (memcg != swap_memcg) {
5864+
if (!mem_cgroup_is_root(swap_memcg))
5865+
page_counter_charge(&swap_memcg->memsw, 1);
5866+
page_counter_uncharge(&memcg->memsw, 1);
5867+
}
5868+
58285869
/*
58295870
* Interrupts should be disabled here because the caller holds the
58305871
* mapping->tree_lock lock which is taken with interrupts-off. It is
@@ -5863,11 +5904,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
58635904
if (!memcg)
58645905
return 0;
58655906

5907+
memcg = mem_cgroup_id_get_online(memcg);
5908+
58665909
if (!mem_cgroup_is_root(memcg) &&
5867-
!page_counter_try_charge(&memcg->swap, 1, &counter))
5910+
!page_counter_try_charge(&memcg->swap, 1, &counter)) {
5911+
mem_cgroup_id_put(memcg);
58685912
return -ENOMEM;
5913+
}
58695914

5870-
mem_cgroup_id_get(memcg);
58715915
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
58725916
VM_BUG_ON_PAGE(oldid, page);
58735917
mem_cgroup_swap_statistics(memcg, true);

mm/memory_hotplug.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
12191219

12201220
/* init node's zones as empty zones, we don't have any present pages.*/
12211221
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
1222+
pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
12221223

12231224
/*
12241225
* The node we allocated has no zone fallback lists. For avoiding
@@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
12491250
static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
12501251
{
12511252
arch_refresh_nodedata(nid, NULL);
1253+
free_percpu(pgdat->per_cpu_nodestats);
12521254
arch_free_nodedata(pgdat);
12531255
return;
12541256
}

mm/oom_kill.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task)
764764
{
765765
struct mm_struct *mm = task->mm;
766766
struct task_struct *p;
767-
bool ret;
767+
bool ret = true;
768768

769769
/*
770770
* Skip tasks without mm because it might have passed its exit_mm and

mm/page_alloc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4060,7 +4060,7 @@ long si_mem_available(void)
40604060
int lru;
40614061

40624062
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4063-
pages[lru] = global_page_state(NR_LRU_BASE + lru);
4063+
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
40644064

40654065
for_each_zone(zone)
40664066
wmark_low += zone->watermark[WMARK_LOW];

0 commit comments

Comments
 (0)