Skip to content

Commit dffb4d6

Browse files
Christoph Lameterpenberg
authored andcommitted
slub: Use statically allocated kmem_cache boot structure for bootstrap
Simplify bootstrap by statically allocated two kmem_cache structures. These are freed after bootup is complete. Allows us to no longer worry about calculations of sizes of kmem_cache structures during bootstrap. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
1 parent 45530c4 commit dffb4d6

File tree

1 file changed

+20
-47
lines changed

1 file changed

+20
-47
lines changed

mm/slub.c

Lines changed: 20 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -176,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
176176
#define __OBJECT_POISON 0x80000000UL /* Poison object */
177177
#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
178178

179-
static int kmem_size = sizeof(struct kmem_cache);
180-
181179
#ifdef CONFIG_SMP
182180
static struct notifier_block slab_notifier;
183181
#endif
@@ -3634,15 +3632,16 @@ static int slab_memory_callback(struct notifier_block *self,
36343632

36353633
/*
36363634
* Used for early kmem_cache structures that were allocated using
3637-
* the page allocator
3635+
* the page allocator. Allocate them properly then fix up the pointers
3636+
* that may be pointing to the wrong kmem_cache structure.
36383637
*/
36393638

3640-
static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3639+
static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
36413640
{
36423641
int node;
3642+
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
36433643

3644-
list_add(&s->list, &slab_caches);
3645-
s->refcount = -1;
3644+
memcpy(s, static_cache, kmem_cache->object_size);
36463645

36473646
for_each_node_state(node, N_NORMAL_MEMORY) {
36483647
struct kmem_cache_node *n = get_node(s, node);
@@ -3658,70 +3657,44 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
36583657
#endif
36593658
}
36603659
}
3660+
list_add(&s->list, &slab_caches);
3661+
return s;
36613662
}
36623663

36633664
void __init kmem_cache_init(void)
36643665
{
3666+
static __initdata struct kmem_cache boot_kmem_cache,
3667+
boot_kmem_cache_node;
36653668
int i;
3666-
int caches = 0;
3667-
struct kmem_cache *temp_kmem_cache;
3668-
int order;
3669-
struct kmem_cache *temp_kmem_cache_node;
3670-
unsigned long kmalloc_size;
3669+
int caches = 2;
36713670

36723671
if (debug_guardpage_minorder())
36733672
slub_max_order = 0;
36743673

3675-
kmem_size = offsetof(struct kmem_cache, node) +
3676-
nr_node_ids * sizeof(struct kmem_cache_node *);
3677-
3678-
/* Allocate two kmem_caches from the page allocator */
3679-
kmalloc_size = ALIGN(kmem_size, cache_line_size());
3680-
order = get_order(2 * kmalloc_size);
3681-
kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
3674+
kmem_cache_node = &boot_kmem_cache_node;
3675+
kmem_cache = &boot_kmem_cache;
36823676

3683-
/*
3684-
* Must first have the slab cache available for the allocations of the
3685-
* struct kmem_cache_node's. There is special bootstrap code in
3686-
* kmem_cache_open for slab_state == DOWN.
3687-
*/
3688-
kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3689-
3690-
kmem_cache_node->name = "kmem_cache_node";
3691-
kmem_cache_node->size = kmem_cache_node->object_size =
3692-
sizeof(struct kmem_cache_node);
3693-
kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3677+
create_boot_cache(kmem_cache_node, "kmem_cache_node",
3678+
sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
36943679

36953680
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
36963681

36973682
/* Able to allocate the per node structures */
36983683
slab_state = PARTIAL;
36993684

3700-
temp_kmem_cache = kmem_cache;
3701-
kmem_cache->name = "kmem_cache";
3702-
kmem_cache->size = kmem_cache->object_size = kmem_size;
3703-
kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3685+
create_boot_cache(kmem_cache, "kmem_cache",
3686+
offsetof(struct kmem_cache, node) +
3687+
nr_node_ids * sizeof(struct kmem_cache_node *),
3688+
SLAB_HWCACHE_ALIGN);
37043689

3705-
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3706-
memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3690+
kmem_cache = bootstrap(&boot_kmem_cache);
37073691

37083692
/*
37093693
* Allocate kmem_cache_node properly from the kmem_cache slab.
37103694
* kmem_cache_node is separately allocated so no need to
37113695
* update any list pointers.
37123696
*/
3713-
temp_kmem_cache_node = kmem_cache_node;
3714-
3715-
kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3716-
memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3717-
3718-
kmem_cache_bootstrap_fixup(kmem_cache_node);
3719-
3720-
caches++;
3721-
kmem_cache_bootstrap_fixup(kmem_cache);
3722-
caches++;
3723-
/* Free temporary boot structure */
3724-
free_pages((unsigned long)temp_kmem_cache, order);
3697+
kmem_cache_node = bootstrap(&boot_kmem_cache_node);
37253698

37263699
/* Now we can use the kmem_cache to allocate kmalloc slabs */
37273700

0 commit comments

Comments
 (0)