@@ -176,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
176
176
#define __OBJECT_POISON 0x80000000UL /* Poison object */
177
177
#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
178
178
179
- static int kmem_size = sizeof (struct kmem_cache );
180
-
181
179
#ifdef CONFIG_SMP
182
180
static struct notifier_block slab_notifier ;
183
181
#endif
@@ -3634,15 +3632,16 @@ static int slab_memory_callback(struct notifier_block *self,
3634
3632
3635
3633
/*
3636
3634
* Used for early kmem_cache structures that were allocated using
3637
- * the page allocator
3635
+ * the page allocator. Allocate them properly then fix up the pointers
3636
+ * that may be pointing to the wrong kmem_cache structure.
3638
3637
*/
3639
3638
3640
- static void __init kmem_cache_bootstrap_fixup (struct kmem_cache * s )
3639
+ static struct kmem_cache * __init bootstrap (struct kmem_cache * static_cache )
3641
3640
{
3642
3641
int node ;
3642
+ struct kmem_cache * s = kmem_cache_zalloc (kmem_cache , GFP_NOWAIT );
3643
3643
3644
- list_add (& s -> list , & slab_caches );
3645
- s -> refcount = -1 ;
3644
+ memcpy (s , static_cache , kmem_cache -> object_size );
3646
3645
3647
3646
for_each_node_state (node , N_NORMAL_MEMORY ) {
3648
3647
struct kmem_cache_node * n = get_node (s , node );
@@ -3658,70 +3657,44 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3658
3657
#endif
3659
3658
}
3660
3659
}
3660
+ list_add (& s -> list , & slab_caches );
3661
+ return s ;
3661
3662
}
3662
3663
3663
3664
void __init kmem_cache_init (void )
3664
3665
{
3666
+ static __initdata struct kmem_cache boot_kmem_cache ,
3667
+ boot_kmem_cache_node ;
3665
3668
int i ;
3666
- int caches = 0 ;
3667
- struct kmem_cache * temp_kmem_cache ;
3668
- int order ;
3669
- struct kmem_cache * temp_kmem_cache_node ;
3670
- unsigned long kmalloc_size ;
3669
+ int caches = 2 ;
3671
3670
3672
3671
if (debug_guardpage_minorder ())
3673
3672
slub_max_order = 0 ;
3674
3673
3675
- kmem_size = offsetof(struct kmem_cache , node ) +
3676
- nr_node_ids * sizeof (struct kmem_cache_node * );
3677
-
3678
- /* Allocate two kmem_caches from the page allocator */
3679
- kmalloc_size = ALIGN (kmem_size , cache_line_size ());
3680
- order = get_order (2 * kmalloc_size );
3681
- kmem_cache = (void * )__get_free_pages (GFP_NOWAIT | __GFP_ZERO , order );
3674
+ kmem_cache_node = & boot_kmem_cache_node ;
3675
+ kmem_cache = & boot_kmem_cache ;
3682
3676
3683
- /*
3684
- * Must first have the slab cache available for the allocations of the
3685
- * struct kmem_cache_node's. There is special bootstrap code in
3686
- * kmem_cache_open for slab_state == DOWN.
3687
- */
3688
- kmem_cache_node = (void * )kmem_cache + kmalloc_size ;
3689
-
3690
- kmem_cache_node -> name = "kmem_cache_node" ;
3691
- kmem_cache_node -> size = kmem_cache_node -> object_size =
3692
- sizeof (struct kmem_cache_node );
3693
- kmem_cache_open (kmem_cache_node , SLAB_HWCACHE_ALIGN | SLAB_PANIC );
3677
+ create_boot_cache (kmem_cache_node , "kmem_cache_node" ,
3678
+ sizeof (struct kmem_cache_node ), SLAB_HWCACHE_ALIGN );
3694
3679
3695
3680
hotplug_memory_notifier (slab_memory_callback , SLAB_CALLBACK_PRI );
3696
3681
3697
3682
/* Able to allocate the per node structures */
3698
3683
slab_state = PARTIAL ;
3699
3684
3700
- temp_kmem_cache = kmem_cache ;
3701
- kmem_cache -> name = "kmem_cache" ;
3702
- kmem_cache -> size = kmem_cache -> object_size = kmem_size ;
3703
- kmem_cache_open ( kmem_cache , SLAB_HWCACHE_ALIGN | SLAB_PANIC );
3685
+ create_boot_cache ( kmem_cache , " kmem_cache" ,
3686
+ offsetof( struct kmem_cache , node ) +
3687
+ nr_node_ids * sizeof ( struct kmem_cache_node * ),
3688
+ SLAB_HWCACHE_ALIGN );
3704
3689
3705
- kmem_cache = kmem_cache_alloc (kmem_cache , GFP_NOWAIT );
3706
- memcpy (kmem_cache , temp_kmem_cache , kmem_size );
3690
+ kmem_cache = bootstrap (& boot_kmem_cache );
3707
3691
3708
3692
/*
3709
3693
* Allocate kmem_cache_node properly from the kmem_cache slab.
3710
3694
* kmem_cache_node is separately allocated so no need to
3711
3695
* update any list pointers.
3712
3696
*/
3713
- temp_kmem_cache_node = kmem_cache_node ;
3714
-
3715
- kmem_cache_node = kmem_cache_alloc (kmem_cache , GFP_NOWAIT );
3716
- memcpy (kmem_cache_node , temp_kmem_cache_node , kmem_size );
3717
-
3718
- kmem_cache_bootstrap_fixup (kmem_cache_node );
3719
-
3720
- caches ++ ;
3721
- kmem_cache_bootstrap_fixup (kmem_cache );
3722
- caches ++ ;
3723
- /* Free temporary boot structure */
3724
- free_pages ((unsigned long )temp_kmem_cache , order );
3697
+ kmem_cache_node = bootstrap (& boot_kmem_cache_node );
3725
3698
3726
3699
/* Now we can use the kmem_cache to allocate kmalloc slabs */
3727
3700
0 commit comments