Skip to content

Commit 62cccb8

Browse files
hnaztorvalds
authored andcommitted
mm: simplify lock_page_memcg()
Now that migration doesn't clear page->mem_cgroup of live pages anymore, it's safe to make lock_page_memcg() and the memcg stat functions take pages, and spare the callers from memcg objects. [akpm@linux-foundation.org: fix warnings] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 6a93ca8 commit 62cccb8

File tree

12 files changed

+88
-117
lines changed

12 files changed

+88
-117
lines changed

fs/buffer.c

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -624,14 +624,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
624624
* The caller must hold lock_page_memcg().
625625
*/
626626
static void __set_page_dirty(struct page *page, struct address_space *mapping,
627-
struct mem_cgroup *memcg, int warn)
627+
int warn)
628628
{
629629
unsigned long flags;
630630

631631
spin_lock_irqsave(&mapping->tree_lock, flags);
632632
if (page->mapping) { /* Race with truncate? */
633633
WARN_ON_ONCE(warn && !PageUptodate(page));
634-
account_page_dirtied(page, mapping, memcg);
634+
account_page_dirtied(page, mapping);
635635
radix_tree_tag_set(&mapping->page_tree,
636636
page_index(page), PAGECACHE_TAG_DIRTY);
637637
}
@@ -666,7 +666,6 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping,
666666
int __set_page_dirty_buffers(struct page *page)
667667
{
668668
int newly_dirty;
669-
struct mem_cgroup *memcg;
670669
struct address_space *mapping = page_mapping(page);
671670

672671
if (unlikely(!mapping))
@@ -686,14 +685,14 @@ int __set_page_dirty_buffers(struct page *page)
686685
* Lock out page->mem_cgroup migration to keep PageDirty
687686
* synchronized with per-memcg dirty page counters.
688687
*/
689-
memcg = lock_page_memcg(page);
688+
lock_page_memcg(page);
690689
newly_dirty = !TestSetPageDirty(page);
691690
spin_unlock(&mapping->private_lock);
692691

693692
if (newly_dirty)
694-
__set_page_dirty(page, mapping, memcg, 1);
693+
__set_page_dirty(page, mapping, 1);
695694

696-
unlock_page_memcg(memcg);
695+
unlock_page_memcg(page);
697696

698697
if (newly_dirty)
699698
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1167,15 +1166,14 @@ void mark_buffer_dirty(struct buffer_head *bh)
11671166
if (!test_set_buffer_dirty(bh)) {
11681167
struct page *page = bh->b_page;
11691168
struct address_space *mapping = NULL;
1170-
struct mem_cgroup *memcg;
11711169

1172-
memcg = lock_page_memcg(page);
1170+
lock_page_memcg(page);
11731171
if (!TestSetPageDirty(page)) {
11741172
mapping = page_mapping(page);
11751173
if (mapping)
1176-
__set_page_dirty(page, mapping, memcg, 0);
1174+
__set_page_dirty(page, mapping, 0);
11771175
}
1178-
unlock_page_memcg(memcg);
1176+
unlock_page_memcg(page);
11791177
if (mapping)
11801178
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
11811179
}

fs/xfs/xfs_aops.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1957,7 +1957,6 @@ xfs_vm_set_page_dirty(
19571957
loff_t end_offset;
19581958
loff_t offset;
19591959
int newly_dirty;
1960-
struct mem_cgroup *memcg;
19611960

19621961
if (unlikely(!mapping))
19631962
return !TestSetPageDirty(page);
@@ -1981,7 +1980,7 @@ xfs_vm_set_page_dirty(
19811980
* Lock out page->mem_cgroup migration to keep PageDirty
19821981
* synchronized with per-memcg dirty page counters.
19831982
*/
1984-
memcg = lock_page_memcg(page);
1983+
lock_page_memcg(page);
19851984
newly_dirty = !TestSetPageDirty(page);
19861985
spin_unlock(&mapping->private_lock);
19871986

@@ -1992,13 +1991,13 @@ xfs_vm_set_page_dirty(
19921991
spin_lock_irqsave(&mapping->tree_lock, flags);
19931992
if (page->mapping) { /* Race with truncate? */
19941993
WARN_ON_ONCE(!PageUptodate(page));
1995-
account_page_dirtied(page, mapping, memcg);
1994+
account_page_dirtied(page, mapping);
19961995
radix_tree_tag_set(&mapping->page_tree,
19971996
page_index(page), PAGECACHE_TAG_DIRTY);
19981997
}
19991998
spin_unlock_irqrestore(&mapping->tree_lock, flags);
20001999
}
2001-
unlock_page_memcg(memcg);
2000+
unlock_page_memcg(page);
20022001
if (newly_dirty)
20032002
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
20042003
return newly_dirty;

include/linux/memcontrol.h

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -455,42 +455,42 @@ bool mem_cgroup_oom_synchronize(bool wait);
455455
extern int do_swap_account;
456456
#endif
457457

458-
struct mem_cgroup *lock_page_memcg(struct page *page);
459-
void unlock_page_memcg(struct mem_cgroup *memcg);
458+
void lock_page_memcg(struct page *page);
459+
void unlock_page_memcg(struct page *page);
460460

461461
/**
462462
* mem_cgroup_update_page_stat - update page state statistics
463-
* @memcg: memcg to account against
463+
* @page: the page
464464
* @idx: page state item to account
465465
* @val: number of pages (positive or negative)
466466
*
467467
* Callers must use lock_page_memcg() to prevent double accounting
468468
* when the page is concurrently being moved to another memcg:
469469
*
470-
* memcg = lock_page_memcg(page);
470+
* lock_page_memcg(page);
471471
* if (TestClearPageState(page))
472-
* mem_cgroup_update_page_stat(memcg, state, -1);
473-
* unlock_page_memcg(memcg);
472+
* mem_cgroup_update_page_stat(page, state, -1);
473+
* unlock_page_memcg(page);
474474
*/
475-
static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
475+
static inline void mem_cgroup_update_page_stat(struct page *page,
476476
enum mem_cgroup_stat_index idx, int val)
477477
{
478478
VM_BUG_ON(!rcu_read_lock_held());
479479

480-
if (memcg)
481-
this_cpu_add(memcg->stat->count[idx], val);
480+
if (page->mem_cgroup)
481+
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
482482
}
483483

484-
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
484+
static inline void mem_cgroup_inc_page_stat(struct page *page,
485485
enum mem_cgroup_stat_index idx)
486486
{
487-
mem_cgroup_update_page_stat(memcg, idx, 1);
487+
mem_cgroup_update_page_stat(page, idx, 1);
488488
}
489489

490-
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
490+
static inline void mem_cgroup_dec_page_stat(struct page *page,
491491
enum mem_cgroup_stat_index idx)
492492
{
493-
mem_cgroup_update_page_stat(memcg, idx, -1);
493+
mem_cgroup_update_page_stat(page, idx, -1);
494494
}
495495

496496
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -661,12 +661,11 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
661661
{
662662
}
663663

664-
static inline struct mem_cgroup *lock_page_memcg(struct page *page)
664+
static inline void lock_page_memcg(struct page *page)
665665
{
666-
return NULL;
667666
}
668667

669-
static inline void unlock_page_memcg(struct mem_cgroup *memcg)
668+
static inline void unlock_page_memcg(struct page *page)
670669
{
671670
}
672671

@@ -692,12 +691,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
692691
return false;
693692
}
694693

695-
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
694+
static inline void mem_cgroup_inc_page_stat(struct page *page,
696695
enum mem_cgroup_stat_index idx)
697696
{
698697
}
699698

700-
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
699+
static inline void mem_cgroup_dec_page_stat(struct page *page,
701700
enum mem_cgroup_stat_index idx)
702701
{
703702
}

include/linux/mm.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1291,10 +1291,9 @@ int __set_page_dirty_nobuffers(struct page *page);
12911291
int __set_page_dirty_no_writeback(struct page *page);
12921292
int redirty_page_for_writepage(struct writeback_control *wbc,
12931293
struct page *page);
1294-
void account_page_dirtied(struct page *page, struct address_space *mapping,
1295-
struct mem_cgroup *memcg);
1294+
void account_page_dirtied(struct page *page, struct address_space *mapping);
12961295
void account_page_cleaned(struct page *page, struct address_space *mapping,
1297-
struct mem_cgroup *memcg, struct bdi_writeback *wb);
1296+
struct bdi_writeback *wb);
12981297
int set_page_dirty(struct page *page);
12991298
int set_page_dirty_lock(struct page *page);
13001299
void cancel_dirty_page(struct page *page);

include/linux/pagemap.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -663,8 +663,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
663663
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
664664
pgoff_t index, gfp_t gfp_mask);
665665
extern void delete_from_page_cache(struct page *page);
666-
extern void __delete_from_page_cache(struct page *page, void *shadow,
667-
struct mem_cgroup *memcg);
666+
extern void __delete_from_page_cache(struct page *page, void *shadow);
668667
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
669668

670669
/*

mm/filemap.c

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -179,8 +179,7 @@ static void page_cache_tree_delete(struct address_space *mapping,
179179
* is safe. The caller must hold the mapping's tree_lock and
180180
* lock_page_memcg().
181181
*/
182-
void __delete_from_page_cache(struct page *page, void *shadow,
183-
struct mem_cgroup *memcg)
182+
void __delete_from_page_cache(struct page *page, void *shadow)
184183
{
185184
struct address_space *mapping = page->mapping;
186185

@@ -239,8 +238,7 @@ void __delete_from_page_cache(struct page *page, void *shadow,
239238
* anyway will be cleared before returning page into buddy allocator.
240239
*/
241240
if (WARN_ON_ONCE(PageDirty(page)))
242-
account_page_cleaned(page, mapping, memcg,
243-
inode_to_wb(mapping->host));
241+
account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
244242
}
245243

246244
/**
@@ -254,7 +252,6 @@ void __delete_from_page_cache(struct page *page, void *shadow,
254252
void delete_from_page_cache(struct page *page)
255253
{
256254
struct address_space *mapping = page->mapping;
257-
struct mem_cgroup *memcg;
258255
unsigned long flags;
259256

260257
void (*freepage)(struct page *);
@@ -263,11 +260,11 @@ void delete_from_page_cache(struct page *page)
263260

264261
freepage = mapping->a_ops->freepage;
265262

266-
memcg = lock_page_memcg(page);
263+
lock_page_memcg(page);
267264
spin_lock_irqsave(&mapping->tree_lock, flags);
268-
__delete_from_page_cache(page, NULL, memcg);
265+
__delete_from_page_cache(page, NULL);
269266
spin_unlock_irqrestore(&mapping->tree_lock, flags);
270-
unlock_page_memcg(memcg);
267+
unlock_page_memcg(page);
271268

272269
if (freepage)
273270
freepage(page);
@@ -551,7 +548,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
551548
if (!error) {
552549
struct address_space *mapping = old->mapping;
553550
void (*freepage)(struct page *);
554-
struct mem_cgroup *memcg;
555551
unsigned long flags;
556552

557553
pgoff_t offset = old->index;
@@ -561,9 +557,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
561557
new->mapping = mapping;
562558
new->index = offset;
563559

564-
memcg = lock_page_memcg(old);
560+
lock_page_memcg(old);
565561
spin_lock_irqsave(&mapping->tree_lock, flags);
566-
__delete_from_page_cache(old, NULL, memcg);
562+
__delete_from_page_cache(old, NULL);
567563
error = radix_tree_insert(&mapping->page_tree, offset, new);
568564
BUG_ON(error);
569565
mapping->nrpages++;
@@ -576,7 +572,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
576572
if (PageSwapBacked(new))
577573
__inc_zone_page_state(new, NR_SHMEM);
578574
spin_unlock_irqrestore(&mapping->tree_lock, flags);
579-
unlock_page_memcg(memcg);
575+
unlock_page_memcg(old);
580576
mem_cgroup_migrate(old, new);
581577
radix_tree_preload_end();
582578
if (freepage)

mm/memcontrol.c

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1690,7 +1690,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
16901690
* This function protects unlocked LRU pages from being moved to
16911691
* another cgroup and stabilizes their page->mem_cgroup binding.
16921692
*/
1693-
struct mem_cgroup *lock_page_memcg(struct page *page)
1693+
void lock_page_memcg(struct page *page)
16941694
{
16951695
struct mem_cgroup *memcg;
16961696
unsigned long flags;
@@ -1699,25 +1699,18 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
16991699
* The RCU lock is held throughout the transaction. The fast
17001700
* path can get away without acquiring the memcg->move_lock
17011701
* because page moving starts with an RCU grace period.
1702-
*
1703-
* The RCU lock also protects the memcg from being freed when
1704-
* the page state that is going to change is the only thing
1705-
* preventing the page from being uncharged.
1706-
* E.g. end-writeback clearing PageWriteback(), which allows
1707-
* migration to go ahead and uncharge the page before the
1708-
* account transaction might be complete.
17091702
*/
17101703
rcu_read_lock();
17111704

17121705
if (mem_cgroup_disabled())
1713-
return NULL;
1706+
return;
17141707
again:
17151708
memcg = page->mem_cgroup;
17161709
if (unlikely(!memcg))
1717-
return NULL;
1710+
return;
17181711

17191712
if (atomic_read(&memcg->moving_account) <= 0)
1720-
return memcg;
1713+
return;
17211714

17221715
spin_lock_irqsave(&memcg->move_lock, flags);
17231716
if (memcg != page->mem_cgroup) {
@@ -1733,16 +1726,18 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
17331726
memcg->move_lock_task = current;
17341727
memcg->move_lock_flags = flags;
17351728

1736-
return memcg;
1729+
return;
17371730
}
17381731
EXPORT_SYMBOL(lock_page_memcg);
17391732

17401733
/**
17411734
* unlock_page_memcg - unlock a page->mem_cgroup binding
1742-
* @memcg: the memcg returned by lock_page_memcg()
1735+
* @page: the page
17431736
*/
1744-
void unlock_page_memcg(struct mem_cgroup *memcg)
1737+
void unlock_page_memcg(struct page *page)
17451738
{
1739+
struct mem_cgroup *memcg = page->mem_cgroup;
1740+
17461741
if (memcg && memcg->move_lock_task == current) {
17471742
unsigned long flags = memcg->move_lock_flags;
17481743

0 commit comments

Comments
 (0)