Skip to content

Commit c65c187

Browse files
Peter Zijlstrapenberg
authored andcommitted
slub: use lockdep_assert_held
Instead of using comments in an attempt at getting the locking right, use proper assertions that actively warn you if you got it wrong. Also add extra braces in a few sites to comply with coding-style. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
1 parent 8afb147 commit c65c187

File tree

1 file changed

+20
-20
lines changed

1 file changed

+20
-20
lines changed

mm/slub.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
985985

986986
/*
987987
* Tracking of fully allocated slabs for debugging purposes.
988-
*
989-
* list_lock must be held.
990988
*/
991989
static void add_full(struct kmem_cache *s,
992990
struct kmem_cache_node *n, struct page *page)
993991
{
992+
lockdep_assert_held(&n->list_lock);
993+
994994
if (!(s->flags & SLAB_STORE_USER))
995995
return;
996996

997997
list_add(&page->lru, &n->full);
998998
}
999999

1000-
/*
1001-
* list_lock must be held.
1002-
*/
1003-
static void remove_full(struct kmem_cache *s, struct page *page)
1000+
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
10041001
{
1002+
lockdep_assert_held(&n->list_lock);
1003+
10051004
if (!(s->flags & SLAB_STORE_USER))
10061005
return;
10071006

@@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
12501249
void *object, u8 val) { return 1; }
12511250
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
12521251
struct page *page) {}
1253-
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
1252+
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1253+
struct page *page) {}
12541254
static inline unsigned long kmem_cache_flags(unsigned long object_size,
12551255
unsigned long flags, const char *name,
12561256
void (*ctor)(void *))
@@ -1504,25 +1504,24 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
15041504

15051505
/*
15061506
* Management of partially allocated slabs.
1507-
*
1508-
* list_lock must be held.
15091507
*/
15101508
static inline void add_partial(struct kmem_cache_node *n,
15111509
struct page *page, int tail)
15121510
{
1511+
lockdep_assert_held(&n->list_lock);
1512+
15131513
n->nr_partial++;
15141514
if (tail == DEACTIVATE_TO_TAIL)
15151515
list_add_tail(&page->lru, &n->partial);
15161516
else
15171517
list_add(&page->lru, &n->partial);
15181518
}
15191519

1520-
/*
1521-
* list_lock must be held.
1522-
*/
15231520
static inline void remove_partial(struct kmem_cache_node *n,
15241521
struct page *page)
15251522
{
1523+
lockdep_assert_held(&n->list_lock);
1524+
15261525
list_del(&page->lru);
15271526
n->nr_partial--;
15281527
}
@@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
15321531
* return the pointer to the freelist.
15331532
*
15341533
* Returns a list of objects or NULL if it fails.
1535-
*
1536-
* Must hold list_lock since we modify the partial list.
15371534
*/
15381535
static inline void *acquire_slab(struct kmem_cache *s,
15391536
struct kmem_cache_node *n, struct page *page,
@@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
15431540
unsigned long counters;
15441541
struct page new;
15451542

1543+
lockdep_assert_held(&n->list_lock);
1544+
15461545
/*
15471546
* Zap the freelist and set the frozen bit.
15481547
* The old freelist is the list of objects for the
@@ -1887,7 +1886,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
18871886

18881887
else if (l == M_FULL)
18891888

1890-
remove_full(s, page);
1889+
remove_full(s, n, page);
18911890

18921891
if (m == M_PARTIAL) {
18931892

@@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
25412540
new.inuse--;
25422541
if ((!new.inuse || !prior) && !was_frozen) {
25432542

2544-
if (kmem_cache_has_cpu_partial(s) && !prior)
2543+
if (kmem_cache_has_cpu_partial(s) && !prior) {
25452544

25462545
/*
25472546
* Slab was on no list before and will be
@@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
25512550
*/
25522551
new.frozen = 1;
25532552

2554-
else { /* Needs to be taken off a list */
2553+
} else { /* Needs to be taken off a list */
25552554

25562555
n = get_node(s, page_to_nid(page));
25572556
/*
@@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
26002599
*/
26012600
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
26022601
if (kmem_cache_debug(s))
2603-
remove_full(s, page);
2602+
remove_full(s, n, page);
26042603
add_partial(n, page, DEACTIVATE_TO_TAIL);
26052604
stat(s, FREE_ADD_PARTIAL);
26062605
}
@@ -2614,9 +2613,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
26142613
*/
26152614
remove_partial(n, page);
26162615
stat(s, FREE_REMOVE_PARTIAL);
2617-
} else
2616+
} else {
26182617
/* Slab must be on the full list */
2619-
remove_full(s, page);
2618+
remove_full(s, n, page);
2619+
}
26202620

26212621
spin_unlock_irqrestore(&n->list_lock, flags);
26222622
stat(s, FREE_SLAB);

0 commit comments

Comments
 (0)