@@ -122,6 +122,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
122
122
#endif
123
123
}
124
124
125
+ static inline bool kmem_cache_has_cpu_partial (struct kmem_cache * s )
126
+ {
127
+ #ifdef CONFIG_SLUB_CPU_PARTIAL
128
+ return !kmem_cache_debug (s );
129
+ #else
130
+ return false;
131
+ #endif
132
+ }
133
+
125
134
/*
126
135
* Issues still to be resolved:
127
136
*
@@ -1572,7 +1581,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1572
1581
put_cpu_partial (s , page , 0 );
1573
1582
stat (s , CPU_PARTIAL_NODE );
1574
1583
}
1575
- if (kmem_cache_debug (s ) || available > s -> cpu_partial / 2 )
1584
+ if (!kmem_cache_has_cpu_partial (s )
1585
+ || available > s -> cpu_partial / 2 )
1576
1586
break ;
1577
1587
1578
1588
}
@@ -1883,6 +1893,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freel
1883
1893
static void unfreeze_partials (struct kmem_cache * s ,
1884
1894
struct kmem_cache_cpu * c )
1885
1895
{
1896
+ #ifdef CONFIG_SLUB_CPU_PARTIAL
1886
1897
struct kmem_cache_node * n = NULL , * n2 = NULL ;
1887
1898
struct page * page , * discard_page = NULL ;
1888
1899
@@ -1937,6 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1937
1948
discard_slab (s , page );
1938
1949
stat (s , FREE_SLAB );
1939
1950
}
1951
+ #endif
1940
1952
}
1941
1953
1942
1954
/*
@@ -1950,6 +1962,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1950
1962
*/
1951
1963
static void put_cpu_partial (struct kmem_cache * s , struct page * page , int drain )
1952
1964
{
1965
+ #ifdef CONFIG_SLUB_CPU_PARTIAL
1953
1966
struct page * oldpage ;
1954
1967
int pages ;
1955
1968
int pobjects ;
@@ -1989,6 +2002,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1989
2002
page -> next = oldpage ;
1990
2003
1991
2004
} while (this_cpu_cmpxchg (s -> cpu_slab -> partial , oldpage , page ) != oldpage );
2005
+ #endif
1992
2006
}
1993
2007
1994
2008
static inline void flush_slab (struct kmem_cache * s , struct kmem_cache_cpu * c )
@@ -2497,7 +2511,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2497
2511
new .inuse -- ;
2498
2512
if ((!new .inuse || !prior ) && !was_frozen ) {
2499
2513
2500
- if (! kmem_cache_debug (s ) && !prior )
2514
+ if (kmem_cache_has_cpu_partial (s ) && !prior )
2501
2515
2502
2516
/*
2503
2517
* Slab was on no list before and will be partially empty
@@ -2552,8 +2566,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2552
2566
* Objects left in the slab. If it was not on the partial list before
2553
2567
* then add it.
2554
2568
*/
2555
- if (kmem_cache_debug (s ) && unlikely (!prior )) {
2556
- remove_full (s , page );
2569
+ if (!kmem_cache_has_cpu_partial (s ) && unlikely (!prior )) {
2570
+ if (kmem_cache_debug (s ))
2571
+ remove_full (s , page );
2557
2572
add_partial (n , page , DEACTIVATE_TO_TAIL );
2558
2573
stat (s , FREE_ADD_PARTIAL );
2559
2574
}
@@ -3061,7 +3076,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3061
3076
* per node list when we run out of per cpu objects. We only fetch 50%
3062
3077
* to keep some capacity around for frees.
3063
3078
*/
3064
- if (kmem_cache_debug (s ))
3079
+ if (! kmem_cache_has_cpu_partial (s ))
3065
3080
s -> cpu_partial = 0 ;
3066
3081
else if (s -> size >= PAGE_SIZE )
3067
3082
s -> cpu_partial = 2 ;
@@ -4456,7 +4471,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4456
4471
err = strict_strtoul (buf , 10 , & objects );
4457
4472
if (err )
4458
4473
return err ;
4459
- if (objects && kmem_cache_debug (s ))
4474
+ if (objects && ! kmem_cache_has_cpu_partial (s ))
4460
4475
return - EINVAL ;
4461
4476
4462
4477
s -> cpu_partial = objects ;
0 commit comments