Skip to content

Commit 4526477

Browse files
Vladimir Davydovtorvalds
authored andcommitted
mm: memcontrol: cleanup kmem charge functions
- Handle memcg_kmem_enabled check out to the caller. This reduces the number of function definitions making the code easier to follow. At the same time it doesn't result in code bloat, because all of these functions are used only in one or two places. - Move __GFP_ACCOUNT check to the caller as well so that one wouldn't have to dive deep into memcg implementation to see which allocations are charged and which are not. - Refresh comments. Link: http://lkml.kernel.org/r/52882a28b542c1979fd9a033b4dc8637fc347399.1464079537.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 632c0a1 commit 4526477

File tree

4 files changed

+80
-123
lines changed

4 files changed

+80
-123
lines changed

include/linux/memcontrol.h

Lines changed: 7 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -749,6 +749,13 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
749749
}
750750
#endif
751751

752+
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
753+
void memcg_kmem_put_cache(struct kmem_cache *cachep);
754+
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
755+
struct mem_cgroup *memcg);
756+
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
757+
void memcg_kmem_uncharge(struct page *page, int order);
758+
752759
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
753760
extern struct static_key_false memcg_kmem_enabled_key;
754761

@@ -769,22 +776,6 @@ static inline bool memcg_kmem_enabled(void)
769776
return static_branch_unlikely(&memcg_kmem_enabled_key);
770777
}
771778

772-
/*
773-
* In general, we'll do everything in our power to not incur in any overhead
774-
* for non-memcg users for the kmem functions. Not even a function call, if we
775-
* can avoid it.
776-
*
777-
* Therefore, we'll inline all those functions so that in the best case, we'll
778-
* see that kmemcg is off for everybody and proceed quickly. If it is on,
779-
* we'll still do most of the flag checking inline. We check a lot of
780-
* conditions, but because they are pretty simple, they are expected to be
781-
* fast.
782-
*/
783-
int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
784-
struct mem_cgroup *memcg);
785-
int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
786-
void __memcg_kmem_uncharge(struct page *page, int order);
787-
788779
/*
789780
* helper for accessing a memcg's index. It will be used as an index in the
790781
* child cache array in kmem_cache, and also to derive its name. This function
@@ -795,67 +786,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
795786
return memcg ? memcg->kmemcg_id : -1;
796787
}
797788

798-
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
799-
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
800-
801-
static inline bool __memcg_kmem_bypass(void)
802-
{
803-
if (!memcg_kmem_enabled())
804-
return true;
805-
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
806-
return true;
807-
return false;
808-
}
809-
810-
/**
811-
* memcg_kmem_charge: charge a kmem page
812-
* @page: page to charge
813-
* @gfp: reclaim mode
814-
* @order: allocation order
815-
*
816-
* Returns 0 on success, an error code on failure.
817-
*/
818-
static __always_inline int memcg_kmem_charge(struct page *page,
819-
gfp_t gfp, int order)
820-
{
821-
if (__memcg_kmem_bypass())
822-
return 0;
823-
if (!(gfp & __GFP_ACCOUNT))
824-
return 0;
825-
return __memcg_kmem_charge(page, gfp, order);
826-
}
827-
828-
/**
829-
* memcg_kmem_uncharge: uncharge a kmem page
830-
* @page: page to uncharge
831-
* @order: allocation order
832-
*/
833-
static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
834-
{
835-
if (memcg_kmem_enabled())
836-
__memcg_kmem_uncharge(page, order);
837-
}
838-
839-
/**
840-
* memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
841-
* @cachep: the original global kmem cache
842-
*
843-
* All memory allocated from a per-memcg cache is charged to the owner memcg.
844-
*/
845-
static __always_inline struct kmem_cache *
846-
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
847-
{
848-
if (__memcg_kmem_bypass())
849-
return cachep;
850-
return __memcg_kmem_get_cache(cachep, gfp);
851-
}
852-
853-
static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
854-
{
855-
if (memcg_kmem_enabled())
856-
__memcg_kmem_put_cache(cachep);
857-
}
858-
859789
/**
860790
* memcg_kmem_update_page_stat - update kmem page state statistics
861791
* @page: the page
@@ -878,15 +808,6 @@ static inline bool memcg_kmem_enabled(void)
878808
return false;
879809
}
880810

881-
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
882-
{
883-
return 0;
884-
}
885-
886-
static inline void memcg_kmem_uncharge(struct page *page, int order)
887-
{
888-
}
889-
890811
static inline int memcg_cache_id(struct mem_cgroup *memcg)
891812
{
892813
return -1;
@@ -900,16 +821,6 @@ static inline void memcg_put_cache_ids(void)
900821
{
901822
}
902823

903-
static inline struct kmem_cache *
904-
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
905-
{
906-
return cachep;
907-
}
908-
909-
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
910-
{
911-
}
912-
913824
static inline void memcg_kmem_update_page_stat(struct page *page,
914825
enum mem_cgroup_stat_index idx, int val)
915826
{

mm/memcontrol.c

Lines changed: 55 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2273,31 +2273,38 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
22732273
current->memcg_kmem_skip_account = 0;
22742274
}
22752275

2276-
/*
2276+
static inline bool memcg_kmem_bypass(void)
2277+
{
2278+
if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2279+
return true;
2280+
return false;
2281+
}
2282+
2283+
/**
2284+
* memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2285+
* @cachep: the original global kmem cache
2286+
*
22772287
* Return the kmem_cache we're supposed to use for a slab allocation.
22782288
* We try to use the current memcg's version of the cache.
22792289
*
2280-
* If the cache does not exist yet, if we are the first user of it,
2281-
* we either create it immediately, if possible, or create it asynchronously
2282-
* in a workqueue.
2283-
* In the latter case, we will let the current allocation go through with
2284-
* the original cache.
2290+
* If the cache does not exist yet, if we are the first user of it, we
2291+
* create it asynchronously in a workqueue and let the current allocation
2292+
* go through with the original cache.
22852293
*
2286-
* Can't be called in interrupt context or from kernel threads.
2287-
* This function needs to be called with rcu_read_lock() held.
2294+
* This function takes a reference to the cache it returns to assure it
2295+
* won't get destroyed while we are working with it. Once the caller is
2296+
* done with it, memcg_kmem_put_cache() must be called to release the
2297+
* reference.
22882298
*/
2289-
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2299+
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
22902300
{
22912301
struct mem_cgroup *memcg;
22922302
struct kmem_cache *memcg_cachep;
22932303
int kmemcg_id;
22942304

22952305
VM_BUG_ON(!is_root_cache(cachep));
22962306

2297-
if (cachep->flags & SLAB_ACCOUNT)
2298-
gfp |= __GFP_ACCOUNT;
2299-
2300-
if (!(gfp & __GFP_ACCOUNT))
2307+
if (memcg_kmem_bypass())
23012308
return cachep;
23022309

23032310
if (current->memcg_kmem_skip_account)
@@ -2330,14 +2337,27 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
23302337
return cachep;
23312338
}
23322339

2333-
void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2340+
/**
2341+
* memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2342+
* @cachep: the cache returned by memcg_kmem_get_cache
2343+
*/
2344+
void memcg_kmem_put_cache(struct kmem_cache *cachep)
23342345
{
23352346
if (!is_root_cache(cachep))
23362347
css_put(&cachep->memcg_params.memcg->css);
23372348
}
23382349

2339-
int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2340-
struct mem_cgroup *memcg)
2350+
/**
2351+
* memcg_kmem_charge: charge a kmem page
2352+
* @page: page to charge
2353+
* @gfp: reclaim mode
2354+
* @order: allocation order
2355+
* @memcg: memory cgroup to charge
2356+
*
2357+
* Returns 0 on success, an error code on failure.
2358+
*/
2359+
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2360+
struct mem_cgroup *memcg)
23412361
{
23422362
unsigned int nr_pages = 1 << order;
23432363
struct page_counter *counter;
@@ -2358,19 +2378,34 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
23582378
return 0;
23592379
}
23602380

2361-
int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2381+
/**
2382+
* memcg_kmem_charge: charge a kmem page to the current memory cgroup
2383+
* @page: page to charge
2384+
* @gfp: reclaim mode
2385+
* @order: allocation order
2386+
*
2387+
* Returns 0 on success, an error code on failure.
2388+
*/
2389+
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
23622390
{
23632391
struct mem_cgroup *memcg;
23642392
int ret = 0;
23652393

2394+
if (memcg_kmem_bypass())
2395+
return 0;
2396+
23662397
memcg = get_mem_cgroup_from_mm(current->mm);
23672398
if (!mem_cgroup_is_root(memcg))
2368-
ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2399+
ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
23692400
css_put(&memcg->css);
23702401
return ret;
23712402
}
2372-
2373-
void __memcg_kmem_uncharge(struct page *page, int order)
2403+
/**
2404+
* memcg_kmem_uncharge: uncharge a kmem page
2405+
* @page: page to uncharge
2406+
* @order: allocation order
2407+
*/
2408+
void memcg_kmem_uncharge(struct page *page, int order)
23742409
{
23752410
struct mem_cgroup *memcg = page->mem_cgroup;
23762411
unsigned int nr_pages = 1 << order;

mm/page_alloc.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4009,7 +4009,8 @@ struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
40094009
struct page *page;
40104010

40114011
page = alloc_pages(gfp_mask, order);
4012-
if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4012+
if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4013+
page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
40134014
__free_pages(page, order);
40144015
page = NULL;
40154016
}
@@ -4021,7 +4022,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
40214022
struct page *page;
40224023

40234024
page = alloc_pages_node(nid, gfp_mask, order);
4024-
if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
4025+
if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
4026+
page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
40254027
__free_pages(page, order);
40264028
page = NULL;
40274029
}
@@ -4034,7 +4036,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
40344036
*/
40354037
void __free_kmem_pages(struct page *page, unsigned int order)
40364038
{
4037-
memcg_kmem_uncharge(page, order);
4039+
if (memcg_kmem_enabled())
4040+
memcg_kmem_uncharge(page, order);
40384041
__free_pages(page, order);
40394042
}
40404043

mm/slab.h

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -254,8 +254,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
254254
if (is_root_cache(s))
255255
return 0;
256256

257-
ret = __memcg_kmem_charge_memcg(page, gfp, order,
258-
s->memcg_params.memcg);
257+
ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
259258
if (ret)
260259
return ret;
261260

@@ -269,6 +268,9 @@ static __always_inline int memcg_charge_slab(struct page *page,
269268
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
270269
struct kmem_cache *s)
271270
{
271+
if (!memcg_kmem_enabled())
272+
return;
273+
272274
memcg_kmem_update_page_stat(page,
273275
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
274276
MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
@@ -391,7 +393,11 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
391393
if (should_failslab(s, flags))
392394
return NULL;
393395

394-
return memcg_kmem_get_cache(s, flags);
396+
if (memcg_kmem_enabled() &&
397+
((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
398+
return memcg_kmem_get_cache(s);
399+
400+
return s;
395401
}
396402

397403
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
@@ -408,7 +414,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
408414
s->flags, flags);
409415
kasan_slab_alloc(s, object, flags);
410416
}
411-
memcg_kmem_put_cache(s);
417+
418+
if (memcg_kmem_enabled())
419+
memcg_kmem_put_cache(s);
412420
}
413421

414422
#ifndef CONFIG_SLOB

0 commit comments

Comments
 (0)