Skip to content

Commit d13d144

Browse files
hkamezawatorvalds
authored andcommitted
memcg: handle swap caches
SwapCache support for memory resource controller (memcg) Before mem+swap controller, memcg itself should handle SwapCache in proper way. This is cut-out from it. In current memcg, SwapCache is just leaked and the user can create tons of SwapCache. This is a leak of account and should be handled. SwapCache accounting is done as following. charge (anon) - charged when it's mapped. (because of readahead, charge at add_to_swap_cache() is not sane) uncharge (anon) - uncharged when it's dropped from swapcache and fully unmapped. means it's not uncharged at unmap. Note: delete from swap cache at swap-in is done after rmap information is established. charge (shmem) - charged at swap-in. this prevents charge at add_to_page_cache(). uncharge (shmem) - uncharged when it's dropped from swapcache and not on shmem's radix-tree. at migration, check against 'old page' is modified to handle shmem. Comparing to the old version discussed (and caused troubles), we have advantages of - PCG_USED bit. - simple migrating handling. So, situation is much easier than several months ago, maybe. [hugh@veritas.com: memcg: handle swap caches build fix] Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Tested-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent c1e862c commit d13d144

File tree

5 files changed

+105
-8
lines changed

5 files changed

+105
-8
lines changed

Documentation/controllers/memory.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,11 @@ behind this approach is that a cgroup that aggressively uses a shared
137137
page will eventually get charged for it (once it is uncharged from
138138
the cgroup that brought it in -- this will happen on memory pressure).
139139

140+
Exception: When you do swapoff and make swapped-out pages of shmem(tmpfs) to
141+
be backed into memory in force, charges for pages are accounted against the
142+
caller of swapoff rather than the users of shmem.
143+
144+
140145
2.4 Reclaim
141146

142147
Each cgroup maintains a per cgroup LRU that consists of an active

include/linux/swap.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -333,6 +333,22 @@ static inline void disable_swap_token(void)
333333
put_swap_token(swap_token_mm);
334334
}
335335

336+
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
337+
extern int mem_cgroup_cache_charge_swapin(struct page *page,
338+
struct mm_struct *mm, gfp_t mask, bool locked);
339+
extern void mem_cgroup_uncharge_swapcache(struct page *page);
340+
#else
341+
static inline
342+
int mem_cgroup_cache_charge_swapin(struct page *page,
343+
struct mm_struct *mm, gfp_t mask, bool locked)
344+
{
345+
return 0;
346+
}
347+
static inline void mem_cgroup_uncharge_swapcache(struct page *page)
348+
{
349+
}
350+
#endif
351+
336352
#else /* CONFIG_SWAP */
337353

338354
#define nr_swap_pages 0L
@@ -409,6 +425,12 @@ static inline swp_entry_t get_swap_page(void)
409425
#define has_swap_token(x) 0
410426
#define disable_swap_token() do { } while(0)
411427

428+
static inline int mem_cgroup_cache_charge_swapin(struct page *page,
429+
struct mm_struct *mm, gfp_t mask, bool locked)
430+
{
431+
return 0;
432+
}
433+
412434
#endif /* CONFIG_SWAP */
413435
#endif /* __KERNEL__*/
414436
#endif /* _LINUX_SWAP_H */

mm/memcontrol.c

Lines changed: 61 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <linux/memcontrol.h>
2222
#include <linux/cgroup.h>
2323
#include <linux/mm.h>
24+
#include <linux/pagemap.h>
2425
#include <linux/smp.h>
2526
#include <linux/page-flags.h>
2627
#include <linux/backing-dev.h>
@@ -139,6 +140,7 @@ enum charge_type {
139140
MEM_CGROUP_CHARGE_TYPE_MAPPED,
140141
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
141142
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
143+
MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
142144
NR_CHARGE_TYPE,
143145
};
144146

@@ -780,6 +782,33 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
780782
MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
781783
}
782784

785+
#ifdef CONFIG_SWAP
786+
int mem_cgroup_cache_charge_swapin(struct page *page,
787+
struct mm_struct *mm, gfp_t mask, bool locked)
788+
{
789+
int ret = 0;
790+
791+
if (mem_cgroup_subsys.disabled)
792+
return 0;
793+
if (unlikely(!mm))
794+
mm = &init_mm;
795+
if (!locked)
796+
lock_page(page);
797+
/*
798+
* If not locked, the page can be dropped from SwapCache until
799+
* we reach here.
800+
*/
801+
if (PageSwapCache(page)) {
802+
ret = mem_cgroup_charge_common(page, mm, mask,
803+
MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
804+
}
805+
if (!locked)
806+
unlock_page(page);
807+
808+
return ret;
809+
}
810+
#endif
811+
783812
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
784813
{
785814
struct page_cgroup *pc;
@@ -817,6 +846,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
817846
if (mem_cgroup_subsys.disabled)
818847
return;
819848

849+
if (PageSwapCache(page))
850+
return;
851+
820852
/*
821853
* Check if our page_cgroup is valid
822854
*/
@@ -825,12 +857,26 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
825857
return;
826858

827859
lock_page_cgroup(pc);
828-
if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
829-
|| !PageCgroupUsed(pc)) {
830-
/* This happens at race in zap_pte_range() and do_swap_page()*/
831-
unlock_page_cgroup(pc);
832-
return;
860+
861+
if (!PageCgroupUsed(pc))
862+
goto unlock_out;
863+
864+
switch (ctype) {
865+
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
866+
if (page_mapped(page))
867+
goto unlock_out;
868+
break;
869+
case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
870+
if (!PageAnon(page)) { /* Shared memory */
871+
if (page->mapping && !page_is_file_cache(page))
872+
goto unlock_out;
873+
} else if (page_mapped(page)) /* Anon */
874+
goto unlock_out;
875+
break;
876+
default:
877+
break;
833878
}
879+
834880
ClearPageCgroupUsed(pc);
835881
mem = pc->mem_cgroup;
836882

@@ -844,6 +890,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
844890
css_put(&mem->css);
845891

846892
return;
893+
894+
unlock_out:
895+
unlock_page_cgroup(pc);
896+
return;
847897
}
848898

849899
void mem_cgroup_uncharge_page(struct page *page)
@@ -863,6 +913,11 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
863913
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
864914
}
865915

916+
void mem_cgroup_uncharge_swapcache(struct page *page)
917+
{
918+
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
919+
}
920+
866921
/*
867922
* Before starting migration, account PAGE_SIZE to mem_cgroup that the old
868923
* page belongs to.
@@ -920,7 +975,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
920975
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
921976

922977
/* unused page is not on radix-tree now. */
923-
if (unused && ctype != MEM_CGROUP_CHARGE_TYPE_MAPPED)
978+
if (unused)
924979
__mem_cgroup_uncharge_common(unused, ctype);
925980

926981
pc = lookup_page_cgroup(target);

mm/shmem.c

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -928,8 +928,12 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
928928
error = 1;
929929
if (!inode)
930930
goto out;
931-
/* Charge page using GFP_HIGHUSER_MOVABLE while we can wait */
932-
error = mem_cgroup_cache_charge(page, current->mm, GFP_HIGHUSER_MOVABLE);
931+
/*
932+
* Charge page using GFP_HIGHUSER_MOVABLE while we can wait.
933+
* charged back to the user(not to caller) when swap account is used.
934+
*/
935+
error = mem_cgroup_cache_charge_swapin(page,
936+
current->mm, GFP_HIGHUSER_MOVABLE, true);
933937
if (error)
934938
goto out;
935939
error = radix_tree_preload(GFP_KERNEL);
@@ -1266,6 +1270,16 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
12661270
goto repeat;
12671271
}
12681272
wait_on_page_locked(swappage);
1273+
/*
1274+
* We want to avoid charge at add_to_page_cache().
1275+
* charge against this swap cache here.
1276+
*/
1277+
if (mem_cgroup_cache_charge_swapin(swappage,
1278+
current->mm, gfp, false)) {
1279+
page_cache_release(swappage);
1280+
error = -ENOMEM;
1281+
goto failed;
1282+
}
12691283
page_cache_release(swappage);
12701284
goto repeat;
12711285
}

mm/swap_state.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ void __delete_from_swap_cache(struct page *page)
118118
total_swapcache_pages--;
119119
__dec_zone_page_state(page, NR_FILE_PAGES);
120120
INC_CACHE_INFO(del_total);
121+
mem_cgroup_uncharge_swapcache(page);
121122
}
122123

123124
/**

0 commit comments

Comments
 (0)