Skip to content

Commit 45530c4

Browse files
Christoph Lameterpenberg
authored andcommitted
mm, sl[au]b: create common functions for boot slab creation
Use a special function to create kmalloc caches and use that function in SLAB and SLUB. Acked-by: Joonsoo Kim <js1304@gmail.com> Reviewed-by: Glauber Costa <glommer@parallels.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
1 parent 3c58346 commit 45530c4

File tree

4 files changed

+60
-66
lines changed

4 files changed

+60
-66
lines changed

mm/slab.c

Lines changed: 14 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1659,23 +1659,13 @@ void __init kmem_cache_init(void)
16591659
* bug.
16601660
*/
16611661

1662-
sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1663-
sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
1664-
sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
1665-
sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
1666-
sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1667-
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1668-
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
1669-
1670-
if (INDEX_AC != INDEX_L3) {
1671-
sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1672-
sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
1673-
sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
1674-
sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
1675-
sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1676-
__kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1677-
list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
1678-
}
1662+
sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
1663+
sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
1664+
1665+
if (INDEX_AC != INDEX_L3)
1666+
sizes[INDEX_L3].cs_cachep =
1667+
create_kmalloc_cache(names[INDEX_L3].name,
1668+
sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
16791669

16801670
slab_early_init = 0;
16811671

@@ -1687,24 +1677,14 @@ void __init kmem_cache_init(void)
16871677
* Note for systems short on memory removing the alignment will
16881678
* allow tighter packing of the smaller caches.
16891679
*/
1690-
if (!sizes->cs_cachep) {
1691-
sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1692-
sizes->cs_cachep->name = names->name;
1693-
sizes->cs_cachep->size = sizes->cs_size;
1694-
sizes->cs_cachep->object_size = sizes->cs_size;
1695-
sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1696-
__kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1697-
list_add(&sizes->cs_cachep->list, &slab_caches);
1698-
}
1680+
if (!sizes->cs_cachep)
1681+
sizes->cs_cachep = create_kmalloc_cache(names->name,
1682+
sizes->cs_size, ARCH_KMALLOC_FLAGS);
1683+
16991684
#ifdef CONFIG_ZONE_DMA
1700-
sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1701-
sizes->cs_dmacachep->name = names->name_dma;
1702-
sizes->cs_dmacachep->size = sizes->cs_size;
1703-
sizes->cs_dmacachep->object_size = sizes->cs_size;
1704-
sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
1705-
__kmem_cache_create(sizes->cs_dmacachep,
1706-
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
1707-
list_add(&sizes->cs_dmacachep->list, &slab_caches);
1685+
sizes->cs_dmacachep = create_kmalloc_cache(
1686+
names->name_dma, sizes->cs_size,
1687+
SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
17081688
#endif
17091689
sizes++;
17101690
names++;

mm/slab.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,11 @@ extern struct kmem_cache *kmem_cache;
3535
/* Functions provided by the slab allocators */
3636
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
3737

38+
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
39+
unsigned long flags);
40+
extern void create_boot_cache(struct kmem_cache *, const char *name,
41+
size_t size, unsigned long flags);
42+
3843
#ifdef CONFIG_SLUB
3944
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
4045
size_t align, unsigned long flags, void (*ctor)(void *));

mm/slab_common.c

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,42 @@ int slab_is_available(void)
202202
return slab_state >= UP;
203203
}
204204

205+
#ifndef CONFIG_SLOB
206+
/* Create a cache during boot when no slab services are available yet */
207+
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
208+
unsigned long flags)
209+
{
210+
int err;
211+
212+
s->name = name;
213+
s->size = s->object_size = size;
214+
s->align = ARCH_KMALLOC_MINALIGN;
215+
err = __kmem_cache_create(s, flags);
216+
217+
if (err)
218+
panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
219+
name, size, err);
220+
221+
s->refcount = -1; /* Exempt from merging for now */
222+
}
223+
224+
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
225+
unsigned long flags)
226+
{
227+
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
228+
229+
if (!s)
230+
panic("Out of memory when creating slab %s\n", name);
231+
232+
create_boot_cache(s, name, size, flags);
233+
list_add(&s->list, &slab_caches);
234+
s->refcount = 1;
235+
return s;
236+
}
237+
238+
#endif /* !CONFIG_SLOB */
239+
240+
205241
#ifdef CONFIG_SLABINFO
206242
static void print_slabinfo_header(struct seq_file *m)
207243
{

mm/slub.c

Lines changed: 5 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -3245,32 +3245,6 @@ static int __init setup_slub_nomerge(char *str)
32453245

32463246
__setup("slub_nomerge", setup_slub_nomerge);
32473247

3248-
static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3249-
int size, unsigned int flags)
3250-
{
3251-
struct kmem_cache *s;
3252-
3253-
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3254-
3255-
s->name = name;
3256-
s->size = s->object_size = size;
3257-
s->align = ARCH_KMALLOC_MINALIGN;
3258-
3259-
/*
3260-
* This function is called with IRQs disabled during early-boot on
3261-
* single CPU so there's no need to take slab_mutex here.
3262-
*/
3263-
if (kmem_cache_open(s, flags))
3264-
goto panic;
3265-
3266-
list_add(&s->list, &slab_caches);
3267-
return s;
3268-
3269-
panic:
3270-
panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
3271-
return NULL;
3272-
}
3273-
32743248
/*
32753249
* Conversion table for small slabs sizes / 8 to the index in the
32763250
* kmalloc array. This is necessary for slabs < 192 since we have non power
@@ -3948,6 +3922,10 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
39483922
if (err)
39493923
return err;
39503924

3925+
/* Mutex is not taken during early boot */
3926+
if (slab_state <= UP)
3927+
return 0;
3928+
39513929
mutex_unlock(&slab_mutex);
39523930
err = sysfs_slab_add(s);
39533931
mutex_lock(&slab_mutex);
@@ -5249,13 +5227,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
52495227
{
52505228
int err;
52515229
const char *name;
5252-
int unmergeable;
5253-
5254-
if (slab_state < FULL)
5255-
/* Defer until later */
5256-
return 0;
5230+
int unmergeable = slab_unmergeable(s);
52575231

5258-
unmergeable = slab_unmergeable(s);
52595232
if (unmergeable) {
52605233
/*
52615234
* Slabcache can never be merged so we can use the name proper.

0 commit comments

Comments
 (0)