Skip to content

Commit 19c7ff9

Browse files
Christoph Lameterpenberg
authored andcommitted
slub: Take node lock during object free checks
Only applies to scenarios where debugging is on: Validation of slabs can currently occur while debugging information is updated from the fast paths of the allocator. This results in various races where we get false reports about slab metadata not being in order. This patch makes the fast paths take the node lock so that serialization with slab validation will occur. Causes additional slowdown in debug scenarios. Reported-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
1 parent 455ce9e commit 19c7ff9

File tree

1 file changed

+18
-12
lines changed

1 file changed

+18
-12
lines changed

mm/slub.c

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1069,13 +1069,13 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa
10691069
return 0;
10701070
}
10711071

1072-
static noinline int free_debug_processing(struct kmem_cache *s,
1073-
struct page *page, void *object, unsigned long addr)
1072+
static noinline struct kmem_cache_node *free_debug_processing(
1073+
struct kmem_cache *s, struct page *page, void *object,
1074+
unsigned long addr, unsigned long *flags)
10741075
{
1075-
unsigned long flags;
1076-
int rc = 0;
1076+
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
10771077

1078-
local_irq_save(flags);
1078+
spin_lock_irqsave(&n->list_lock, *flags);
10791079
slab_lock(page);
10801080

10811081
if (!check_slab(s, page))
@@ -1113,15 +1113,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
11131113
set_track(s, object, TRACK_FREE, addr);
11141114
trace(s, page, object, 0);
11151115
init_object(s, object, SLUB_RED_INACTIVE);
1116-
rc = 1;
11171116
out:
11181117
slab_unlock(page);
1119-
local_irq_restore(flags);
1120-
return rc;
1118+
/*
1119+
* Keep node_lock to preserve integrity
1120+
* until the object is actually freed
1121+
*/
1122+
return n;
11211123

11221124
fail:
1125+
slab_unlock(page);
1126+
spin_unlock_irqrestore(&n->list_lock, *flags);
11231127
slab_fix(s, "Object at 0x%p not freed", object);
1124-
goto out;
1128+
return NULL;
11251129
}
11261130

11271131
static int __init setup_slub_debug(char *str)
@@ -1214,8 +1218,9 @@ static inline void setup_object_debug(struct kmem_cache *s,
12141218
static inline int alloc_debug_processing(struct kmem_cache *s,
12151219
struct page *page, void *object, unsigned long addr) { return 0; }
12161220

1217-
static inline int free_debug_processing(struct kmem_cache *s,
1218-
struct page *page, void *object, unsigned long addr) { return 0; }
1221+
static inline struct kmem_cache_node *free_debug_processing(
1222+
struct kmem_cache *s, struct page *page, void *object,
1223+
unsigned long addr, unsigned long *flags) { return NULL; }
12191224

12201225
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
12211226
{ return 1; }
@@ -2452,7 +2457,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
24522457

24532458
stat(s, FREE_SLOWPATH);
24542459

2455-
if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
2460+
if (kmem_cache_debug(s) &&
2461+
!(n = free_debug_processing(s, page, x, addr, &flags)))
24562462
return;
24572463

24582464
do {

0 commit comments

Comments
 (0)