@@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
985
985
986
986
/*
987
987
* Tracking of fully allocated slabs for debugging purposes.
988
- *
989
- * list_lock must be held.
990
988
*/
991
989
static void add_full (struct kmem_cache * s ,
992
990
struct kmem_cache_node * n , struct page * page )
993
991
{
992
+ lockdep_assert_held (& n -> list_lock );
993
+
994
994
if (!(s -> flags & SLAB_STORE_USER ))
995
995
return ;
996
996
997
997
list_add (& page -> lru , & n -> full );
998
998
}
999
999
1000
- /*
1001
- * list_lock must be held.
1002
- */
1003
- static void remove_full (struct kmem_cache * s , struct page * page )
1000
+ static void remove_full (struct kmem_cache * s , struct kmem_cache_node * n , struct page * page )
1004
1001
{
1002
+ lockdep_assert_held (& n -> list_lock );
1003
+
1005
1004
if (!(s -> flags & SLAB_STORE_USER ))
1006
1005
return ;
1007
1006
@@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
1250
1249
void * object , u8 val ) { return 1 ; }
1251
1250
static inline void add_full (struct kmem_cache * s , struct kmem_cache_node * n ,
1252
1251
struct page * page ) {}
1253
- static inline void remove_full (struct kmem_cache * s , struct page * page ) {}
1252
+ static inline void remove_full (struct kmem_cache * s , struct kmem_cache_node * n ,
1253
+ struct page * page ) {}
1254
1254
static inline unsigned long kmem_cache_flags (unsigned long object_size ,
1255
1255
unsigned long flags , const char * name ,
1256
1256
void (* ctor )(void * ))
@@ -1504,25 +1504,24 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
1504
1504
1505
1505
/*
1506
1506
* Management of partially allocated slabs.
1507
- *
1508
- * list_lock must be held.
1509
1507
*/
1510
1508
static inline void add_partial (struct kmem_cache_node * n ,
1511
1509
struct page * page , int tail )
1512
1510
{
1511
+ lockdep_assert_held (& n -> list_lock );
1512
+
1513
1513
n -> nr_partial ++ ;
1514
1514
if (tail == DEACTIVATE_TO_TAIL )
1515
1515
list_add_tail (& page -> lru , & n -> partial );
1516
1516
else
1517
1517
list_add (& page -> lru , & n -> partial );
1518
1518
}
1519
1519
1520
- /*
1521
- * list_lock must be held.
1522
- */
1523
1520
static inline void remove_partial (struct kmem_cache_node * n ,
1524
1521
struct page * page )
1525
1522
{
1523
+ lockdep_assert_held (& n -> list_lock );
1524
+
1526
1525
list_del (& page -> lru );
1527
1526
n -> nr_partial -- ;
1528
1527
}
@@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
1532
1531
* return the pointer to the freelist.
1533
1532
*
1534
1533
* Returns a list of objects or NULL if it fails.
1535
- *
1536
- * Must hold list_lock since we modify the partial list.
1537
1534
*/
1538
1535
static inline void * acquire_slab (struct kmem_cache * s ,
1539
1536
struct kmem_cache_node * n , struct page * page ,
@@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
1543
1540
unsigned long counters ;
1544
1541
struct page new ;
1545
1542
1543
+ lockdep_assert_held (& n -> list_lock );
1544
+
1546
1545
/*
1547
1546
* Zap the freelist and set the frozen bit.
1548
1547
* The old freelist is the list of objects for the
@@ -1887,7 +1886,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
1887
1886
1888
1887
else if (l == M_FULL )
1889
1888
1890
- remove_full (s , page );
1889
+ remove_full (s , n , page );
1891
1890
1892
1891
if (m == M_PARTIAL ) {
1893
1892
@@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2541
2540
new .inuse -- ;
2542
2541
if ((!new .inuse || !prior ) && !was_frozen ) {
2543
2542
2544
- if (kmem_cache_has_cpu_partial (s ) && !prior )
2543
+ if (kmem_cache_has_cpu_partial (s ) && !prior ) {
2545
2544
2546
2545
/*
2547
2546
* Slab was on no list before and will be
@@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2551
2550
*/
2552
2551
new .frozen = 1 ;
2553
2552
2554
- else { /* Needs to be taken off a list */
2553
+ } else { /* Needs to be taken off a list */
2555
2554
2556
2555
n = get_node (s , page_to_nid (page ));
2557
2556
/*
@@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2600
2599
*/
2601
2600
if (!kmem_cache_has_cpu_partial (s ) && unlikely (!prior )) {
2602
2601
if (kmem_cache_debug (s ))
2603
- remove_full (s , page );
2602
+ remove_full (s , n , page );
2604
2603
add_partial (n , page , DEACTIVATE_TO_TAIL );
2605
2604
stat (s , FREE_ADD_PARTIAL );
2606
2605
}
@@ -2614,9 +2613,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2614
2613
*/
2615
2614
remove_partial (n , page );
2616
2615
stat (s , FREE_REMOVE_PARTIAL );
2617
- } else
2616
+ } else {
2618
2617
/* Slab must be on the full list */
2619
- remove_full (s , page );
2618
+ remove_full (s , n , page );
2619
+ }
2620
2620
2621
2621
spin_unlock_irqrestore (& n -> list_lock , flags );
2622
2622
stat (s , FREE_SLAB );
0 commit comments