@@ -1329,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1329
1329
n = get_node (s , zone_to_nid (zone ));
1330
1330
1331
1331
if (n && cpuset_zone_allowed_hardwall (zone , flags ) &&
1332
- n -> nr_partial > MIN_PARTIAL ) {
1332
+ n -> nr_partial > n -> min_partial ) {
1333
1333
page = get_partial_node (n );
1334
1334
if (page )
1335
1335
return page ;
@@ -1381,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1381
1381
slab_unlock (page );
1382
1382
} else {
1383
1383
stat (c , DEACTIVATE_EMPTY );
1384
- if (n -> nr_partial < MIN_PARTIAL ) {
1384
+ if (n -> nr_partial < n -> min_partial ) {
1385
1385
/*
1386
1386
* Adding an empty slab to the partial slabs in order
1387
1387
* to avoid page allocator overhead. This slab needs
@@ -1913,9 +1913,21 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
1913
1913
#endif
1914
1914
}
1915
1915
1916
- static void init_kmem_cache_node (struct kmem_cache_node * n )
1916
+ static void
1917
+ init_kmem_cache_node (struct kmem_cache_node * n , struct kmem_cache * s )
1917
1918
{
1918
1919
n -> nr_partial = 0 ;
1920
+
1921
+ /*
1922
+ * The larger the object size is, the more pages we want on the partial
1923
+ * list to avoid pounding the page allocator excessively.
1924
+ */
1925
+ n -> min_partial = ilog2 (s -> size );
1926
+ if (n -> min_partial < MIN_PARTIAL )
1927
+ n -> min_partial = MIN_PARTIAL ;
1928
+ else if (n -> min_partial > MAX_PARTIAL )
1929
+ n -> min_partial = MAX_PARTIAL ;
1930
+
1919
1931
spin_lock_init (& n -> list_lock );
1920
1932
INIT_LIST_HEAD (& n -> partial );
1921
1933
#ifdef CONFIG_SLUB_DEBUG
@@ -2087,7 +2099,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2087
2099
init_object (kmalloc_caches , n , 1 );
2088
2100
init_tracking (kmalloc_caches , n );
2089
2101
#endif
2090
- init_kmem_cache_node (n );
2102
+ init_kmem_cache_node (n , kmalloc_caches );
2091
2103
inc_slabs_node (kmalloc_caches , node , page -> objects );
2092
2104
2093
2105
/*
@@ -2144,7 +2156,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2144
2156
2145
2157
}
2146
2158
s -> node [node ] = n ;
2147
- init_kmem_cache_node (n );
2159
+ init_kmem_cache_node (n , s );
2148
2160
}
2149
2161
return 1 ;
2150
2162
}
@@ -2155,7 +2167,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
2155
2167
2156
2168
static int init_kmem_cache_nodes (struct kmem_cache * s , gfp_t gfpflags )
2157
2169
{
2158
- init_kmem_cache_node (& s -> local_node );
2170
+ init_kmem_cache_node (& s -> local_node , s );
2159
2171
return 1 ;
2160
2172
}
2161
2173
#endif
@@ -2715,7 +2727,6 @@ size_t ksize(const void *object)
2715
2727
*/
2716
2728
return s -> size ;
2717
2729
}
2718
- EXPORT_SYMBOL (ksize );
2719
2730
2720
2731
void kfree (const void * x )
2721
2732
{
@@ -2890,7 +2901,7 @@ static int slab_mem_going_online_callback(void *arg)
2890
2901
ret = - ENOMEM ;
2891
2902
goto out ;
2892
2903
}
2893
- init_kmem_cache_node (n );
2904
+ init_kmem_cache_node (n , s );
2894
2905
s -> node [nid ] = n ;
2895
2906
}
2896
2907
out :
0 commit comments