Skip to content

Commit 41bec7c

Browse files
committed
mm/slub: remove slab_lock() usage for debug operations
All alloc and free operations on debug caches are now serialized by n->list_lock, so we can remove slab_lock() usage in validate_slab() and list_slab_objects() as those also happen under n->list_lock. Note the usage in list_slab_objects() could happen even on non-debug caches, but only during cache shutdown time, so there should not be any parallel freeing activity anymore. Except for buggy slab users, but in that case the slab_lock() would not help against the common cmpxchg based fast paths (in non-debug caches) anyway. Also adjust documentation comments accordingly. Suggested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: David Rientjes <rientjes@google.com>
1 parent c7323a5 commit 41bec7c

File tree

1 file changed

+8
-11
lines changed

1 file changed

+8
-11
lines changed

mm/slub.c

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
* 1. slab_mutex (Global Mutex)
5151
* 2. node->list_lock (Spinlock)
5252
* 3. kmem_cache->cpu_slab->lock (Local lock)
53-
* 4. slab_lock(slab) (Only on some arches or for debugging)
53+
* 4. slab_lock(slab) (Only on some arches)
5454
* 5. object_map_lock (Only for debugging)
5555
*
5656
* slab_mutex
@@ -64,8 +64,9 @@
6464
* The slab_lock is a wrapper around the page lock, thus it is a bit
6565
* spinlock.
6666
*
67-
* The slab_lock is only used for debugging and on arches that do not
68-
* have the ability to do a cmpxchg_double. It only protects:
67+
* The slab_lock is only used on arches that do not have the ability
68+
* to do a cmpxchg_double. It only protects:
69+
*
6970
* A. slab->freelist -> List of free objects in a slab
7071
* B. slab->inuse -> Number of objects in use
7172
* C. slab->objects -> Number of objects in slab
@@ -94,6 +95,9 @@
9495
* allocating a long series of objects that fill up slabs does not require
9596
* the list lock.
9697
*
98+
* For debug caches, all allocations are forced to go through a list_lock
99+
* protected region to serialize against concurrent validation.
100+
*
97101
* cpu_slab->lock local lock
98102
*
99103
* This locks protect slowpath manipulation of all kmem_cache_cpu fields
@@ -4369,7 +4373,6 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
43694373
void *p;
43704374

43714375
slab_err(s, slab, text, s->name);
4372-
slab_lock(slab, &flags);
43734376

43744377
map = get_map(s, slab);
43754378
for_each_object(p, s, addr, slab->objects) {
@@ -4380,7 +4383,6 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
43804383
}
43814384
}
43824385
put_map(map);
4383-
slab_unlock(slab, &flags);
43844386
#endif
43854387
}
43864388

@@ -5108,12 +5110,9 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab,
51085110
{
51095111
void *p;
51105112
void *addr = slab_address(slab);
5111-
unsigned long flags;
5112-
5113-
slab_lock(slab, &flags);
51145113

51155114
if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
5116-
goto unlock;
5115+
return;
51175116

51185117
/* Now we know that a valid freelist exists */
51195118
__fill_map(obj_map, s, slab);
@@ -5124,8 +5123,6 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab,
51245123
if (!check_object(s, slab, p, val))
51255124
break;
51265125
}
5127-
unlock:
5128-
slab_unlock(slab, &flags);
51295126
}
51305127

51315128
static int validate_slab_node(struct kmem_cache *s,

0 commit comments

Comments
 (0)