Skip to content

Commit 03ec0ed

Browse files
netoptimizertorvalds
authored andcommitted
slub: fix kmem cgroup bug in kmem_cache_alloc_bulk
The call slab_pre_alloc_hook() interacts with kmemgc and is not allowed to be called several times inside the bulk alloc for loop, due to the call to memcg_kmem_get_cache(). This would result in hitting the VM_BUG_ON in __memcg_kmem_get_cache. As suggested by Vladimir Davydov, change slab_post_alloc_hook() to be able to handle an array of objects. A subtle detail is, loop iterator "i" in slab_post_alloc_hook() must have same type (size_t) as size argument. This helps the compiler to easier realize that it can remove the loop, when all debug statements inside loop evaluates to nothing. Note, this is only an issue because the kernel is compiled with GCC option: -fno-strict-overflow In slab_alloc_node() the compiler inlines and optimizes the invocation of slab_post_alloc_hook(s, flags, 1, &object) by removing the loop and access object directly. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Reported-by: Vladimir Davydov <vdavydov@virtuozzo.com> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent d0ecd89 commit 03ec0ed

File tree

1 file changed

+22
-18
lines changed

1 file changed

+22
-18
lines changed

mm/slub.c

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1292,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
12921292
return memcg_kmem_get_cache(s, flags);
12931293
}
12941294

1295-
static inline void slab_post_alloc_hook(struct kmem_cache *s,
1296-
gfp_t flags, void *object)
1295+
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1296+
size_t size, void **p)
12971297
{
1298+
size_t i;
1299+
12981300
flags &= gfp_allowed_mask;
1299-
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1300-
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
1301+
for (i = 0; i < size; i++) {
1302+
void *object = p[i];
1303+
1304+
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1305+
kmemleak_alloc_recursive(object, s->object_size, 1,
1306+
s->flags, flags);
1307+
kasan_slab_alloc(s, object);
1308+
}
13011309
memcg_kmem_put_cache(s);
1302-
kasan_slab_alloc(s, object);
13031310
}
13041311

13051312
static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -2475,7 +2482,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
24752482
static __always_inline void *slab_alloc_node(struct kmem_cache *s,
24762483
gfp_t gfpflags, int node, unsigned long addr)
24772484
{
2478-
void **object;
2485+
void *object;
24792486
struct kmem_cache_cpu *c;
24802487
struct page *page;
24812488
unsigned long tid;
@@ -2554,7 +2561,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
25542561
if (unlikely(gfpflags & __GFP_ZERO) && object)
25552562
memset(object, 0, s->object_size);
25562563

2557-
slab_post_alloc_hook(s, gfpflags, object);
2564+
slab_post_alloc_hook(s, gfpflags, 1, &object);
25582565

25592566
return object;
25602567
}
@@ -2904,6 +2911,10 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
29042911
struct kmem_cache_cpu *c;
29052912
int i;
29062913

2914+
/* memcg and kmem_cache debug support */
2915+
s = slab_pre_alloc_hook(s, flags);
2916+
if (unlikely(!s))
2917+
return false;
29072918
/*
29082919
* Drain objects in the per cpu slab, while disabling local
29092920
* IRQs, which protects against PREEMPT and interrupts
@@ -2928,17 +2939,8 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
29282939
c = this_cpu_ptr(s->cpu_slab);
29292940
continue; /* goto for-loop */
29302941
}
2931-
2932-
/* kmem_cache debug support */
2933-
s = slab_pre_alloc_hook(s, flags);
2934-
if (unlikely(!s))
2935-
goto error;
2936-
29372942
c->freelist = get_freepointer(s, object);
29382943
p[i] = object;
2939-
2940-
/* kmem_cache debug support */
2941-
slab_post_alloc_hook(s, flags, object);
29422944
}
29432945
c->tid = next_tid(c->tid);
29442946
local_irq_enable();
@@ -2951,11 +2953,13 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
29512953
memset(p[j], 0, s->object_size);
29522954
}
29532955

2956+
/* memcg and kmem_cache debug support */
2957+
slab_post_alloc_hook(s, flags, size, p);
29542958
return true;
2955-
29562959
error:
2957-
__kmem_cache_free_bulk(s, i, p);
29582960
local_irq_enable();
2961+
slab_post_alloc_hook(s, flags, i, p);
2962+
__kmem_cache_free_bulk(s, i, p);
29592963
return false;
29602964
}
29612965
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

0 commit comments

Comments
 (0)