Skip to content

Commit c9019e9

Browse files
hnaztorvalds
authored andcommitted
mm: memcontrol: eliminate raw access to stat and event counters
Replace all raw 'this_cpu_' modifications of the stat and event per-cpu counters with API functions such as mod_memcg_state(). This makes the code easier to read, but is also in preparation for the next patch, which changes the per-cpu implementation of those counters. Link: http://lkml.kernel.org/r/20171103153336.24044-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 2b9fceb commit c9019e9

File tree

2 files changed

+45
-45
lines changed

2 files changed

+45
-45
lines changed

include/linux/memcontrol.h

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -272,13 +272,6 @@ static inline bool mem_cgroup_disabled(void)
272272
return !cgroup_subsys_enabled(memory_cgrp_subsys);
273273
}
274274

275-
static inline void mem_cgroup_event(struct mem_cgroup *memcg,
276-
enum memcg_event_item event)
277-
{
278-
this_cpu_inc(memcg->stat->events[event]);
279-
cgroup_file_notify(&memcg->events_file);
280-
}
281-
282275
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
283276

284277
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
@@ -627,15 +620,23 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
627620
gfp_t gfp_mask,
628621
unsigned long *total_scanned);
629622

623+
/* idx can be of type enum memcg_event_item or vm_event_item */
624+
static inline void __count_memcg_events(struct mem_cgroup *memcg,
625+
int idx, unsigned long count)
626+
{
627+
if (!mem_cgroup_disabled())
628+
__this_cpu_add(memcg->stat->events[idx], count);
629+
}
630+
631+
/* idx can be of type enum memcg_event_item or vm_event_item */
630632
static inline void count_memcg_events(struct mem_cgroup *memcg,
631-
enum vm_event_item idx,
632-
unsigned long count)
633+
int idx, unsigned long count)
633634
{
634635
if (!mem_cgroup_disabled())
635636
this_cpu_add(memcg->stat->events[idx], count);
636637
}
637638

638-
/* idx can be of type enum memcg_stat_item or node_stat_item */
639+
/* idx can be of type enum memcg_event_item or vm_event_item */
639640
static inline void count_memcg_page_event(struct page *page,
640641
int idx)
641642
{
@@ -654,12 +655,20 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
654655
rcu_read_lock();
655656
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
656657
if (likely(memcg)) {
657-
this_cpu_inc(memcg->stat->events[idx]);
658+
count_memcg_events(memcg, idx, 1);
658659
if (idx == OOM_KILL)
659660
cgroup_file_notify(&memcg->events_file);
660661
}
661662
rcu_read_unlock();
662663
}
664+
665+
static inline void mem_cgroup_event(struct mem_cgroup *memcg,
666+
enum memcg_event_item event)
667+
{
668+
count_memcg_events(memcg, event, 1);
669+
cgroup_file_notify(&memcg->events_file);
670+
}
671+
663672
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
664673
void mem_cgroup_split_huge_fixup(struct page *head);
665674
#endif

mm/memcontrol.c

Lines changed: 25 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -586,23 +586,23 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
586586
* counted as CACHE even if it's on ANON LRU.
587587
*/
588588
if (PageAnon(page))
589-
__this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
589+
__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
590590
else {
591-
__this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
591+
__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
592592
if (PageSwapBacked(page))
593-
__this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
593+
__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
594594
}
595595

596596
if (compound) {
597597
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
598-
__this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
598+
__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
599599
}
600600

601601
/* pagein of a big page is an event. So, ignore page size */
602602
if (nr_pages > 0)
603-
__this_cpu_inc(memcg->stat->events[PGPGIN]);
603+
__count_memcg_events(memcg, PGPGIN, 1);
604604
else {
605-
__this_cpu_inc(memcg->stat->events[PGPGOUT]);
605+
__count_memcg_events(memcg, PGPGOUT, 1);
606606
nr_pages = -nr_pages; /* for event */
607607
}
608608

@@ -2415,18 +2415,11 @@ void mem_cgroup_split_huge_fixup(struct page *head)
24152415
for (i = 1; i < HPAGE_PMD_NR; i++)
24162416
head[i].mem_cgroup = head->mem_cgroup;
24172417

2418-
__this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
2419-
HPAGE_PMD_NR);
2418+
__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
24202419
}
24212420
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
24222421

24232422
#ifdef CONFIG_MEMCG_SWAP
2424-
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2425-
int nr_entries)
2426-
{
2427-
this_cpu_add(memcg->stat->count[MEMCG_SWAP], nr_entries);
2428-
}
2429-
24302423
/**
24312424
* mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
24322425
* @entry: swap entry to be moved
@@ -2450,8 +2443,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
24502443
new_id = mem_cgroup_id(to);
24512444

24522445
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2453-
mem_cgroup_swap_statistics(from, -1);
2454-
mem_cgroup_swap_statistics(to, 1);
2446+
mod_memcg_state(from, MEMCG_SWAP, -1);
2447+
mod_memcg_state(to, MEMCG_SWAP, 1);
24552448
return 0;
24562449
}
24572450
return -EINVAL;
@@ -4584,8 +4577,8 @@ static int mem_cgroup_move_account(struct page *page,
45844577
spin_lock_irqsave(&from->move_lock, flags);
45854578

45864579
if (!anon && page_mapped(page)) {
4587-
__this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
4588-
__this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
4580+
__mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
4581+
__mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
45894582
}
45904583

45914584
/*
@@ -4597,16 +4590,14 @@ static int mem_cgroup_move_account(struct page *page,
45974590
struct address_space *mapping = page_mapping(page);
45984591

45994592
if (mapping_cap_account_dirty(mapping)) {
4600-
__this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
4601-
nr_pages);
4602-
__this_cpu_add(to->stat->count[NR_FILE_DIRTY],
4603-
nr_pages);
4593+
__mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
4594+
__mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
46044595
}
46054596
}
46064597

46074598
if (PageWriteback(page)) {
4608-
__this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
4609-
__this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
4599+
__mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
4600+
__mod_memcg_state(to, NR_WRITEBACK, nr_pages);
46104601
}
46114602

46124603
/*
@@ -5642,11 +5633,11 @@ static void uncharge_batch(const struct uncharge_gather *ug)
56425633
}
56435634

56445635
local_irq_save(flags);
5645-
__this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS], ug->nr_anon);
5646-
__this_cpu_sub(ug->memcg->stat->count[MEMCG_CACHE], ug->nr_file);
5647-
__this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS_HUGE], ug->nr_huge);
5648-
__this_cpu_sub(ug->memcg->stat->count[NR_SHMEM], ug->nr_shmem);
5649-
__this_cpu_add(ug->memcg->stat->events[PGPGOUT], ug->pgpgout);
5636+
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
5637+
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
5638+
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
5639+
__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
5640+
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
56505641
__this_cpu_add(ug->memcg->stat->nr_page_events, nr_pages);
56515642
memcg_check_events(ug->memcg, ug->dummy_page);
56525643
local_irq_restore(flags);
@@ -5874,7 +5865,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
58745865
if (in_softirq())
58755866
gfp_mask = GFP_NOWAIT;
58765867

5877-
this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5868+
mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
58785869

58795870
if (try_charge(memcg, gfp_mask, nr_pages) == 0)
58805871
return true;
@@ -5895,7 +5886,7 @@ void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
58955886
return;
58965887
}
58975888

5898-
this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5889+
mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
58995890

59005891
refill_stock(memcg, nr_pages);
59015892
}
@@ -6019,7 +6010,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
60196010
oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
60206011
nr_entries);
60216012
VM_BUG_ON_PAGE(oldid, page);
6022-
mem_cgroup_swap_statistics(swap_memcg, nr_entries);
6013+
mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
60236014

60246015
page->mem_cgroup = NULL;
60256016

@@ -6085,7 +6076,7 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
60856076
mem_cgroup_id_get_many(memcg, nr_pages - 1);
60866077
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
60876078
VM_BUG_ON_PAGE(oldid, page);
6088-
mem_cgroup_swap_statistics(memcg, nr_pages);
6079+
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
60896080

60906081
return 0;
60916082
}
@@ -6113,7 +6104,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
61136104
else
61146105
page_counter_uncharge(&memcg->memsw, nr_pages);
61156106
}
6116-
mem_cgroup_swap_statistics(memcg, -nr_pages);
6107+
mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
61176108
mem_cgroup_id_put_many(memcg, nr_pages);
61186109
}
61196110
rcu_read_unlock();

0 commit comments

Comments
 (0)