Skip to content

Commit 345c905

Browse files
JoonsooKimpenberg
authored andcommitted
slub: Make cpu partial slab support configurable
CPU partial support can introduce level of indeterminism that is not wanted in certain context (like a realtime kernel). Make it configurable. This patch is based on Christoph Lameter's "slub: Make cpu partial slab support configurable V2". Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
1 parent e7efa61 commit 345c905

File tree

2 files changed

+32
-6
lines changed

2 files changed

+32
-6
lines changed

init/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1511,6 +1511,17 @@ config SLOB
15111511

15121512
endchoice
15131513

1514+
config SLUB_CPU_PARTIAL
1515+
default y
1516+
depends on SLUB
1517+
bool "SLUB per cpu partial cache"
1518+
help
1519+
Per cpu partial caches accellerate objects allocation and freeing
1520+
that is local to a processor at the price of more indeterminism
1521+
in the latency of the free. On overflow these caches will be cleared
1522+
which requires the taking of locks that may cause latency spikes.
1523+
Typically one would choose no for a realtime system.
1524+
15141525
config MMAP_ALLOW_UNINITIALIZED
15151526
bool "Allow mmapped anonymous memory to be uninitialized"
15161527
depends on EXPERT && !MMU

mm/slub.c

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
122122
#endif
123123
}
124124

125+
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
126+
{
127+
#ifdef CONFIG_SLUB_CPU_PARTIAL
128+
return !kmem_cache_debug(s);
129+
#else
130+
return false;
131+
#endif
132+
}
133+
125134
/*
126135
* Issues still to be resolved:
127136
*
@@ -1572,7 +1581,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
15721581
put_cpu_partial(s, page, 0);
15731582
stat(s, CPU_PARTIAL_NODE);
15741583
}
1575-
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1584+
if (!kmem_cache_has_cpu_partial(s)
1585+
|| available > s->cpu_partial / 2)
15761586
break;
15771587

15781588
}
@@ -1883,6 +1893,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freel
18831893
static void unfreeze_partials(struct kmem_cache *s,
18841894
struct kmem_cache_cpu *c)
18851895
{
1896+
#ifdef CONFIG_SLUB_CPU_PARTIAL
18861897
struct kmem_cache_node *n = NULL, *n2 = NULL;
18871898
struct page *page, *discard_page = NULL;
18881899

@@ -1937,6 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s,
19371948
discard_slab(s, page);
19381949
stat(s, FREE_SLAB);
19391950
}
1951+
#endif
19401952
}
19411953

19421954
/*
@@ -1950,6 +1962,7 @@ static void unfreeze_partials(struct kmem_cache *s,
19501962
*/
19511963
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
19521964
{
1965+
#ifdef CONFIG_SLUB_CPU_PARTIAL
19531966
struct page *oldpage;
19541967
int pages;
19551968
int pobjects;
@@ -1989,6 +2002,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
19892002
page->next = oldpage;
19902003

19912004
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
2005+
#endif
19922006
}
19932007

19942008
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2497,7 +2511,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
24972511
new.inuse--;
24982512
if ((!new.inuse || !prior) && !was_frozen) {
24992513

2500-
if (!kmem_cache_debug(s) && !prior)
2514+
if (kmem_cache_has_cpu_partial(s) && !prior)
25012515

25022516
/*
25032517
* Slab was on no list before and will be partially empty
@@ -2552,8 +2566,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
25522566
* Objects left in the slab. If it was not on the partial list before
25532567
* then add it.
25542568
*/
2555-
if (kmem_cache_debug(s) && unlikely(!prior)) {
2556-
remove_full(s, page);
2569+
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2570+
if (kmem_cache_debug(s))
2571+
remove_full(s, page);
25572572
add_partial(n, page, DEACTIVATE_TO_TAIL);
25582573
stat(s, FREE_ADD_PARTIAL);
25592574
}
@@ -3061,7 +3076,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
30613076
* per node list when we run out of per cpu objects. We only fetch 50%
30623077
* to keep some capacity around for frees.
30633078
*/
3064-
if (kmem_cache_debug(s))
3079+
if (!kmem_cache_has_cpu_partial(s))
30653080
s->cpu_partial = 0;
30663081
else if (s->size >= PAGE_SIZE)
30673082
s->cpu_partial = 2;
@@ -4456,7 +4471,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
44564471
err = strict_strtoul(buf, 10, &objects);
44574472
if (err)
44584473
return err;
4459-
if (objects && kmem_cache_debug(s))
4474+
if (objects && !kmem_cache_has_cpu_partial(s))
44604475
return -EINVAL;
44614476

44624477
s->cpu_partial = objects;

0 commit comments

Comments
 (0)