@@ -1000,23 +1000,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1000
1000
1001
1001
/*
1002
1002
* Tracking of fully allocated slabs for debugging purposes.
1003
- *
1004
- * list_lock must be held.
1005
1003
*/
1006
1004
static void add_full (struct kmem_cache * s ,
1007
1005
struct kmem_cache_node * n , struct page * page )
1008
1006
{
1007
+ lockdep_assert_held (& n -> list_lock );
1008
+
1009
1009
if (!(s -> flags & SLAB_STORE_USER ))
1010
1010
return ;
1011
1011
1012
1012
list_add (& page -> lru , & n -> full );
1013
1013
}
1014
1014
1015
- /*
1016
- * list_lock must be held.
1017
- */
1018
- static void remove_full (struct kmem_cache * s , struct page * page )
1015
+ static void remove_full (struct kmem_cache * s , struct kmem_cache_node * n , struct page * page )
1019
1016
{
1017
+ lockdep_assert_held (& n -> list_lock );
1018
+
1020
1019
if (!(s -> flags & SLAB_STORE_USER ))
1021
1020
return ;
1022
1021
@@ -1265,7 +1264,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
1265
1264
void * object , u8 val ) { return 1 ; }
1266
1265
static inline void add_full (struct kmem_cache * s , struct kmem_cache_node * n ,
1267
1266
struct page * page ) {}
1268
- static inline void remove_full (struct kmem_cache * s , struct page * page ) {}
1267
+ static inline void remove_full (struct kmem_cache * s , struct kmem_cache_node * n ,
1268
+ struct page * page ) {}
1269
1269
static inline unsigned long kmem_cache_flags (unsigned long object_size ,
1270
1270
unsigned long flags , const char * name ,
1271
1271
void (* ctor )(void * ))
@@ -1519,25 +1519,24 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
1519
1519
1520
1520
/*
1521
1521
* Management of partially allocated slabs.
1522
- *
1523
- * list_lock must be held.
1524
1522
*/
1525
1523
static inline void add_partial (struct kmem_cache_node * n ,
1526
1524
struct page * page , int tail )
1527
1525
{
1526
+ lockdep_assert_held (& n -> list_lock );
1527
+
1528
1528
n -> nr_partial ++ ;
1529
1529
if (tail == DEACTIVATE_TO_TAIL )
1530
1530
list_add_tail (& page -> lru , & n -> partial );
1531
1531
else
1532
1532
list_add (& page -> lru , & n -> partial );
1533
1533
}
1534
1534
1535
- /*
1536
- * list_lock must be held.
1537
- */
1538
1535
static inline void remove_partial (struct kmem_cache_node * n ,
1539
1536
struct page * page )
1540
1537
{
1538
+ lockdep_assert_held (& n -> list_lock );
1539
+
1541
1540
list_del (& page -> lru );
1542
1541
n -> nr_partial -- ;
1543
1542
}
@@ -1547,8 +1546,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
1547
1546
* return the pointer to the freelist.
1548
1547
*
1549
1548
* Returns a list of objects or NULL if it fails.
1550
- *
1551
- * Must hold list_lock since we modify the partial list.
1552
1549
*/
1553
1550
static inline void * acquire_slab (struct kmem_cache * s ,
1554
1551
struct kmem_cache_node * n , struct page * page ,
@@ -1558,6 +1555,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
1558
1555
unsigned long counters ;
1559
1556
struct page new ;
1560
1557
1558
+ lockdep_assert_held (& n -> list_lock );
1559
+
1561
1560
/*
1562
1561
* Zap the freelist and set the frozen bit.
1563
1562
* The old freelist is the list of objects for the
@@ -1902,7 +1901,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
1902
1901
1903
1902
else if (l == M_FULL )
1904
1903
1905
- remove_full (s , page );
1904
+ remove_full (s , n , page );
1906
1905
1907
1906
if (m == M_PARTIAL ) {
1908
1907
@@ -2556,7 +2555,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2556
2555
new .inuse -- ;
2557
2556
if ((!new .inuse || !prior ) && !was_frozen ) {
2558
2557
2559
- if (kmem_cache_has_cpu_partial (s ) && !prior )
2558
+ if (kmem_cache_has_cpu_partial (s ) && !prior ) {
2560
2559
2561
2560
/*
2562
2561
* Slab was on no list before and will be
@@ -2566,7 +2565,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2566
2565
*/
2567
2566
new .frozen = 1 ;
2568
2567
2569
- else { /* Needs to be taken off a list */
2568
+ } else { /* Needs to be taken off a list */
2570
2569
2571
2570
n = get_node (s , page_to_nid (page ));
2572
2571
/*
@@ -2615,7 +2614,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2615
2614
*/
2616
2615
if (!kmem_cache_has_cpu_partial (s ) && unlikely (!prior )) {
2617
2616
if (kmem_cache_debug (s ))
2618
- remove_full (s , page );
2617
+ remove_full (s , n , page );
2619
2618
add_partial (n , page , DEACTIVATE_TO_TAIL );
2620
2619
stat (s , FREE_ADD_PARTIAL );
2621
2620
}
@@ -2629,9 +2628,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2629
2628
*/
2630
2629
remove_partial (n , page );
2631
2630
stat (s , FREE_REMOVE_PARTIAL );
2632
- } else
2631
+ } else {
2633
2632
/* Slab must be on the full list */
2634
- remove_full (s , page );
2633
+ remove_full (s , n , page );
2634
+ }
2635
2635
2636
2636
spin_unlock_irqrestore (& n -> list_lock , flags );
2637
2637
stat (s , FREE_SLAB );
@@ -2905,7 +2905,13 @@ static void early_kmem_cache_node_alloc(int node)
2905
2905
init_kmem_cache_node (n );
2906
2906
inc_slabs_node (kmem_cache_node , node , page -> objects );
2907
2907
2908
+ /*
2909
+ * the lock is for lockdep's sake, not for any actual
2910
+ * race protection
2911
+ */
2912
+ spin_lock (& n -> list_lock );
2908
2913
add_partial (n , page , DEACTIVATE_TO_HEAD );
2914
+ spin_unlock (& n -> list_lock );
2909
2915
}
2910
2916
2911
2917
static void free_kmem_cache_nodes (struct kmem_cache * s )
@@ -4314,7 +4320,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4314
4320
4315
4321
page = ACCESS_ONCE (c -> partial );
4316
4322
if (page ) {
4317
- x = page -> pobjects ;
4323
+ node = page_to_nid (page );
4324
+ if (flags & SO_TOTAL )
4325
+ WARN_ON_ONCE (1 );
4326
+ else if (flags & SO_OBJECTS )
4327
+ WARN_ON_ONCE (1 );
4328
+ else
4329
+ x = page -> pages ;
4318
4330
total += x ;
4319
4331
nodes [node ] += x ;
4320
4332
}
@@ -5178,7 +5190,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
5178
5190
}
5179
5191
5180
5192
s -> kobj .kset = slab_kset ;
5181
- err = kobject_init_and_add (& s -> kobj , & slab_ktype , NULL , name );
5193
+ err = kobject_init_and_add (& s -> kobj , & slab_ktype , NULL , "%s" , name );
5182
5194
if (err ) {
5183
5195
kobject_put (& s -> kobj );
5184
5196
return err ;
0 commit comments