@@ -586,23 +586,23 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
586
586
* counted as CACHE even if it's on ANON LRU.
587
587
*/
588
588
if (PageAnon (page ))
589
- __this_cpu_add (memcg -> stat -> count [ MEMCG_RSS ] , nr_pages );
589
+ __mod_memcg_state (memcg , MEMCG_RSS , nr_pages );
590
590
else {
591
- __this_cpu_add (memcg -> stat -> count [ MEMCG_CACHE ] , nr_pages );
591
+ __mod_memcg_state (memcg , MEMCG_CACHE , nr_pages );
592
592
if (PageSwapBacked (page ))
593
- __this_cpu_add (memcg -> stat -> count [ NR_SHMEM ] , nr_pages );
593
+ __mod_memcg_state (memcg , NR_SHMEM , nr_pages );
594
594
}
595
595
596
596
if (compound ) {
597
597
VM_BUG_ON_PAGE (!PageTransHuge (page ), page );
598
- __this_cpu_add (memcg -> stat -> count [ MEMCG_RSS_HUGE ] , nr_pages );
598
+ __mod_memcg_state (memcg , MEMCG_RSS_HUGE , nr_pages );
599
599
}
600
600
601
601
/* pagein of a big page is an event. So, ignore page size */
602
602
if (nr_pages > 0 )
603
- __this_cpu_inc (memcg -> stat -> events [ PGPGIN ] );
603
+ __count_memcg_events (memcg , PGPGIN , 1 );
604
604
else {
605
- __this_cpu_inc (memcg -> stat -> events [ PGPGOUT ] );
605
+ __count_memcg_events (memcg , PGPGOUT , 1 );
606
606
nr_pages = - nr_pages ; /* for event */
607
607
}
608
608
@@ -2415,18 +2415,11 @@ void mem_cgroup_split_huge_fixup(struct page *head)
2415
2415
for (i = 1 ; i < HPAGE_PMD_NR ; i ++ )
2416
2416
head [i ].mem_cgroup = head -> mem_cgroup ;
2417
2417
2418
- __this_cpu_sub (head -> mem_cgroup -> stat -> count [MEMCG_RSS_HUGE ],
2419
- HPAGE_PMD_NR );
2418
+ __mod_memcg_state (head -> mem_cgroup , MEMCG_RSS_HUGE , - HPAGE_PMD_NR );
2420
2419
}
2421
2420
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2422
2421
2423
2422
#ifdef CONFIG_MEMCG_SWAP
2424
- static void mem_cgroup_swap_statistics (struct mem_cgroup * memcg ,
2425
- int nr_entries )
2426
- {
2427
- this_cpu_add (memcg -> stat -> count [MEMCG_SWAP ], nr_entries );
2428
- }
2429
-
2430
2423
/**
2431
2424
* mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2432
2425
* @entry: swap entry to be moved
@@ -2450,8 +2443,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
2450
2443
new_id = mem_cgroup_id (to );
2451
2444
2452
2445
if (swap_cgroup_cmpxchg (entry , old_id , new_id ) == old_id ) {
2453
- mem_cgroup_swap_statistics (from , -1 );
2454
- mem_cgroup_swap_statistics (to , 1 );
2446
+ mod_memcg_state (from , MEMCG_SWAP , -1 );
2447
+ mod_memcg_state (to , MEMCG_SWAP , 1 );
2455
2448
return 0 ;
2456
2449
}
2457
2450
return - EINVAL ;
@@ -4584,8 +4577,8 @@ static int mem_cgroup_move_account(struct page *page,
4584
4577
spin_lock_irqsave (& from -> move_lock , flags );
4585
4578
4586
4579
if (!anon && page_mapped (page )) {
4587
- __this_cpu_sub (from -> stat -> count [ NR_FILE_MAPPED ], nr_pages );
4588
- __this_cpu_add (to -> stat -> count [ NR_FILE_MAPPED ] , nr_pages );
4580
+ __mod_memcg_state (from , NR_FILE_MAPPED , - nr_pages );
4581
+ __mod_memcg_state (to , NR_FILE_MAPPED , nr_pages );
4589
4582
}
4590
4583
4591
4584
/*
@@ -4597,16 +4590,14 @@ static int mem_cgroup_move_account(struct page *page,
4597
4590
struct address_space * mapping = page_mapping (page );
4598
4591
4599
4592
if (mapping_cap_account_dirty (mapping )) {
4600
- __this_cpu_sub (from -> stat -> count [NR_FILE_DIRTY ],
4601
- nr_pages );
4602
- __this_cpu_add (to -> stat -> count [NR_FILE_DIRTY ],
4603
- nr_pages );
4593
+ __mod_memcg_state (from , NR_FILE_DIRTY , - nr_pages );
4594
+ __mod_memcg_state (to , NR_FILE_DIRTY , nr_pages );
4604
4595
}
4605
4596
}
4606
4597
4607
4598
if (PageWriteback (page )) {
4608
- __this_cpu_sub (from -> stat -> count [ NR_WRITEBACK ], nr_pages );
4609
- __this_cpu_add (to -> stat -> count [ NR_WRITEBACK ] , nr_pages );
4599
+ __mod_memcg_state (from , NR_WRITEBACK , - nr_pages );
4600
+ __mod_memcg_state (to , NR_WRITEBACK , nr_pages );
4610
4601
}
4611
4602
4612
4603
/*
@@ -5642,11 +5633,11 @@ static void uncharge_batch(const struct uncharge_gather *ug)
5642
5633
}
5643
5634
5644
5635
local_irq_save (flags );
5645
- __this_cpu_sub (ug -> memcg -> stat -> count [ MEMCG_RSS ], ug -> nr_anon );
5646
- __this_cpu_sub (ug -> memcg -> stat -> count [ MEMCG_CACHE ], ug -> nr_file );
5647
- __this_cpu_sub (ug -> memcg -> stat -> count [ MEMCG_RSS_HUGE ], ug -> nr_huge );
5648
- __this_cpu_sub (ug -> memcg -> stat -> count [ NR_SHMEM ], ug -> nr_shmem );
5649
- __this_cpu_add (ug -> memcg -> stat -> events [ PGPGOUT ] , ug -> pgpgout );
5636
+ __mod_memcg_state (ug -> memcg , MEMCG_RSS , - ug -> nr_anon );
5637
+ __mod_memcg_state (ug -> memcg , MEMCG_CACHE , - ug -> nr_file );
5638
+ __mod_memcg_state (ug -> memcg , MEMCG_RSS_HUGE , - ug -> nr_huge );
5639
+ __mod_memcg_state (ug -> memcg , NR_SHMEM , - ug -> nr_shmem );
5640
+ __count_memcg_events (ug -> memcg , PGPGOUT , ug -> pgpgout );
5650
5641
__this_cpu_add (ug -> memcg -> stat -> nr_page_events , nr_pages );
5651
5642
memcg_check_events (ug -> memcg , ug -> dummy_page );
5652
5643
local_irq_restore (flags );
@@ -5874,7 +5865,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5874
5865
if (in_softirq ())
5875
5866
gfp_mask = GFP_NOWAIT ;
5876
5867
5877
- this_cpu_add (memcg -> stat -> count [ MEMCG_SOCK ] , nr_pages );
5868
+ mod_memcg_state (memcg , MEMCG_SOCK , nr_pages );
5878
5869
5879
5870
if (try_charge (memcg , gfp_mask , nr_pages ) == 0 )
5880
5871
return true;
@@ -5895,7 +5886,7 @@ void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5895
5886
return ;
5896
5887
}
5897
5888
5898
- this_cpu_sub (memcg -> stat -> count [ MEMCG_SOCK ], nr_pages );
5889
+ mod_memcg_state (memcg , MEMCG_SOCK , - nr_pages );
5899
5890
5900
5891
refill_stock (memcg , nr_pages );
5901
5892
}
@@ -6019,7 +6010,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
6019
6010
oldid = swap_cgroup_record (entry , mem_cgroup_id (swap_memcg ),
6020
6011
nr_entries );
6021
6012
VM_BUG_ON_PAGE (oldid , page );
6022
- mem_cgroup_swap_statistics (swap_memcg , nr_entries );
6013
+ mod_memcg_state (swap_memcg , MEMCG_SWAP , nr_entries );
6023
6014
6024
6015
page -> mem_cgroup = NULL ;
6025
6016
@@ -6085,7 +6076,7 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6085
6076
mem_cgroup_id_get_many (memcg , nr_pages - 1 );
6086
6077
oldid = swap_cgroup_record (entry , mem_cgroup_id (memcg ), nr_pages );
6087
6078
VM_BUG_ON_PAGE (oldid , page );
6088
- mem_cgroup_swap_statistics (memcg , nr_pages );
6079
+ mod_memcg_state (memcg , MEMCG_SWAP , nr_pages );
6089
6080
6090
6081
return 0 ;
6091
6082
}
@@ -6113,7 +6104,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
6113
6104
else
6114
6105
page_counter_uncharge (& memcg -> memsw , nr_pages );
6115
6106
}
6116
- mem_cgroup_swap_statistics (memcg , - nr_pages );
6107
+ mod_memcg_state (memcg , MEMCG_SWAP , - nr_pages );
6117
6108
mem_cgroup_id_put_many (memcg , nr_pages );
6118
6109
}
6119
6110
rcu_read_unlock ();
0 commit comments