Skip to content

Commit 6a93ca8

Browse files
hnaztorvalds
authored andcommitted
mm: migrate: do not touch page->mem_cgroup of live pages
Changing a page's memcg association complicates dealing with the page, so we want to limit this as much as possible. Page migration e.g. does not have to do that. Just like page cache replacement, it can forcibly charge a replacement page, and then uncharge the old page when it gets freed. Temporarily overcharging the cgroup by a single page is not an issue in practice, and charging is so cheap nowadays that this is much preferrable to the headache of messing with live pages. The only place that still changes the page->mem_cgroup binding of live pages is when pages move along with a task to another cgroup. But that path isolates the page from the LRU, takes the page lock, and the move lock (lock_page_memcg()). That means page->mem_cgroup is always stable in callers that have the page isolated from the LRU or locked. Lighter unlocked paths, like writeback accounting, can use lock_page_memcg(). [akpm@linux-foundation.org: fix build] [vdavydov@virtuozzo.com: fix lockdep splat] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 23047a9 commit 6a93ca8

File tree

6 files changed

+20
-25
lines changed

6 files changed

+20
-25
lines changed

include/linux/memcontrol.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
300300
void mem_cgroup_uncharge(struct page *page);
301301
void mem_cgroup_uncharge_list(struct list_head *page_list);
302302

303-
void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage);
303+
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
304304

305305
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
306306
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
@@ -580,7 +580,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
580580
{
581581
}
582582

583-
static inline void mem_cgroup_replace_page(struct page *old, struct page *new)
583+
static inline void mem_cgroup_migrate(struct page *old, struct page *new)
584584
{
585585
}
586586

include/linux/mm.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -905,20 +905,11 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
905905
{
906906
return page->mem_cgroup;
907907
}
908-
909-
static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
910-
{
911-
page->mem_cgroup = memcg;
912-
}
913908
#else
914909
static inline struct mem_cgroup *page_memcg(struct page *page)
915910
{
916911
return NULL;
917912
}
918-
919-
static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
920-
{
921-
}
922913
#endif
923914

924915
/*

mm/filemap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -577,7 +577,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
577577
__inc_zone_page_state(new, NR_SHMEM);
578578
spin_unlock_irqrestore(&mapping->tree_lock, flags);
579579
unlock_page_memcg(memcg);
580-
mem_cgroup_replace_page(old, new);
580+
mem_cgroup_migrate(old, new);
581581
radix_tree_preload_end();
582582
if (freepage)
583583
freepage(old);

mm/memcontrol.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4457,7 +4457,7 @@ static int mem_cgroup_move_account(struct page *page,
44574457
VM_BUG_ON(compound && !PageTransHuge(page));
44584458

44594459
/*
4460-
* Prevent mem_cgroup_replace_page() from looking at
4460+
* Prevent mem_cgroup_migrate() from looking at
44614461
* page->mem_cgroup of its source page while we change it.
44624462
*/
44634463
ret = -EBUSY;
@@ -5486,16 +5486,17 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
54865486
}
54875487

54885488
/**
5489-
* mem_cgroup_replace_page - migrate a charge to another page
5490-
* @oldpage: currently charged page
5491-
* @newpage: page to transfer the charge to
5489+
* mem_cgroup_migrate - charge a page's replacement
5490+
* @oldpage: currently circulating page
5491+
* @newpage: replacement page
54925492
*
5493-
* Migrate the charge from @oldpage to @newpage.
5493+
* Charge @newpage as a replacement page for @oldpage. @oldpage will
5494+
* be uncharged upon free.
54945495
*
54955496
* Both pages must be locked, @newpage->mapping must be set up.
54965497
* Either or both pages might be on the LRU already.
54975498
*/
5498-
void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
5499+
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
54995500
{
55005501
struct mem_cgroup *memcg;
55015502
unsigned int nr_pages;

mm/migrate.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -326,12 +326,13 @@ int migrate_page_move_mapping(struct address_space *mapping,
326326
return -EAGAIN;
327327

328328
/* No turning back from here */
329-
set_page_memcg(newpage, page_memcg(page));
330329
newpage->index = page->index;
331330
newpage->mapping = page->mapping;
332331
if (PageSwapBacked(page))
333332
SetPageSwapBacked(newpage);
334333

334+
mem_cgroup_migrate(page, newpage);
335+
335336
return MIGRATEPAGE_SUCCESS;
336337
}
337338

@@ -373,7 +374,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
373374
* Now we know that no one else is looking at the page:
374375
* no turning back from here.
375376
*/
376-
set_page_memcg(newpage, page_memcg(page));
377377
newpage->index = page->index;
378378
newpage->mapping = page->mapping;
379379
if (PageSwapBacked(page))
@@ -428,6 +428,8 @@ int migrate_page_move_mapping(struct address_space *mapping,
428428
}
429429
local_irq_enable();
430430

431+
mem_cgroup_migrate(page, newpage);
432+
431433
return MIGRATEPAGE_SUCCESS;
432434
}
433435

@@ -458,16 +460,19 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
458460
return -EAGAIN;
459461
}
460462

461-
set_page_memcg(newpage, page_memcg(page));
462463
newpage->index = page->index;
463464
newpage->mapping = page->mapping;
465+
464466
get_page(newpage);
465467

466468
radix_tree_replace_slot(pslot, newpage);
467469

468470
page_unfreeze_refs(page, expected_count - 1);
469471

470472
spin_unlock_irq(&mapping->tree_lock);
473+
474+
mem_cgroup_migrate(page, newpage);
475+
471476
return MIGRATEPAGE_SUCCESS;
472477
}
473478

@@ -775,7 +780,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
775780
* page is freed; but stats require that PageAnon be left as PageAnon.
776781
*/
777782
if (rc == MIGRATEPAGE_SUCCESS) {
778-
set_page_memcg(page, NULL);
779783
if (!PageAnon(page))
780784
page->mapping = NULL;
781785
}
@@ -1842,8 +1846,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
18421846
}
18431847

18441848
mlock_migrate_page(new_page, page);
1845-
set_page_memcg(new_page, page_memcg(page));
1846-
set_page_memcg(page, NULL);
1849+
mem_cgroup_migrate(page, new_page);
18471850
page_remove_rmap(page, true);
18481851
set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
18491852

mm/shmem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1116,7 +1116,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
11161116
*/
11171117
oldpage = newpage;
11181118
} else {
1119-
mem_cgroup_replace_page(oldpage, newpage);
1119+
mem_cgroup_migrate(oldpage, newpage);
11201120
lru_cache_add_anon(newpage);
11211121
*pagep = newpage;
11221122
}

0 commit comments

Comments
 (0)