162
162
*/
163
163
static bool pfmemalloc_active __read_mostly ;
164
164
165
- /* Legal flag mask for kmem_cache_create(). */
166
- #if DEBUG
167
- # define CREATE_MASK (SLAB_RED_ZONE | \
168
- SLAB_POISON | SLAB_HWCACHE_ALIGN | \
169
- SLAB_CACHE_DMA | \
170
- SLAB_STORE_USER | \
171
- SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
172
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
173
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
174
- #else
175
- # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
176
- SLAB_CACHE_DMA | \
177
- SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
178
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
179
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
180
- #endif
181
-
182
165
/*
183
166
* kmem_bufctl_t:
184
167
*
@@ -564,15 +547,11 @@ static struct cache_names __initdata cache_names[] = {
564
547
#undef CACHE
565
548
};
566
549
567
- static struct arraycache_init initarray_cache __initdata =
568
- { {0 , BOOT_CPUCACHE_ENTRIES , 1 , 0 } };
569
550
static struct arraycache_init initarray_generic =
570
551
{ {0 , BOOT_CPUCACHE_ENTRIES , 1 , 0 } };
571
552
572
553
/* internal cache of cache description objs */
573
- static struct kmem_list3 * kmem_cache_nodelists [MAX_NUMNODES ];
574
554
static struct kmem_cache kmem_cache_boot = {
575
- .nodelists = kmem_cache_nodelists ,
576
555
.batchcount = 1 ,
577
556
.limit = BOOT_CPUCACHE_ENTRIES ,
578
557
.shared = 1 ,
@@ -1576,29 +1555,34 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1576
1555
}
1577
1556
}
1578
1557
1558
+ /*
1559
+ * The memory after the last cpu cache pointer is used for the
1560
+ * the nodelists pointer.
1561
+ */
1562
+ static void setup_nodelists_pointer (struct kmem_cache * cachep )
1563
+ {
1564
+ cachep -> nodelists = (struct kmem_list3 * * )& cachep -> array [nr_cpu_ids ];
1565
+ }
1566
+
1579
1567
/*
1580
1568
* Initialisation. Called after the page allocator have been initialised and
1581
1569
* before smp_init().
1582
1570
*/
1583
1571
void __init kmem_cache_init (void )
1584
1572
{
1585
- size_t left_over ;
1586
1573
struct cache_sizes * sizes ;
1587
1574
struct cache_names * names ;
1588
1575
int i ;
1589
- int order ;
1590
- int node ;
1591
1576
1592
1577
kmem_cache = & kmem_cache_boot ;
1578
+ setup_nodelists_pointer (kmem_cache );
1593
1579
1594
1580
if (num_possible_nodes () == 1 )
1595
1581
use_alien_caches = 0 ;
1596
1582
1597
- for (i = 0 ; i < NUM_INIT_LISTS ; i ++ ) {
1583
+ for (i = 0 ; i < NUM_INIT_LISTS ; i ++ )
1598
1584
kmem_list3_init (& initkmem_list3 [i ]);
1599
- if (i < MAX_NUMNODES )
1600
- kmem_cache -> nodelists [i ] = NULL ;
1601
- }
1585
+
1602
1586
set_up_list3s (kmem_cache , CACHE_CACHE );
1603
1587
1604
1588
/*
@@ -1629,37 +1613,16 @@ void __init kmem_cache_init(void)
1629
1613
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
1630
1614
*/
1631
1615
1632
- node = numa_mem_id ();
1633
-
1634
1616
/* 1) create the kmem_cache */
1635
- INIT_LIST_HEAD (& slab_caches );
1636
- list_add (& kmem_cache -> list , & slab_caches );
1637
- kmem_cache -> colour_off = cache_line_size ();
1638
- kmem_cache -> array [smp_processor_id ()] = & initarray_cache .cache ;
1639
- kmem_cache -> nodelists [node ] = & initkmem_list3 [CACHE_CACHE + node ];
1640
1617
1641
1618
/*
1642
1619
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1643
1620
*/
1644
- kmem_cache -> size = offsetof(struct kmem_cache , array [nr_cpu_ids ]) +
1645
- nr_node_ids * sizeof (struct kmem_list3 * );
1646
- kmem_cache -> object_size = kmem_cache -> size ;
1647
- kmem_cache -> size = ALIGN (kmem_cache -> object_size ,
1648
- cache_line_size ());
1649
- kmem_cache -> reciprocal_buffer_size =
1650
- reciprocal_value (kmem_cache -> size );
1651
-
1652
- for (order = 0 ; order < MAX_ORDER ; order ++ ) {
1653
- cache_estimate (order , kmem_cache -> size ,
1654
- cache_line_size (), 0 , & left_over , & kmem_cache -> num );
1655
- if (kmem_cache -> num )
1656
- break ;
1657
- }
1658
- BUG_ON (!kmem_cache -> num );
1659
- kmem_cache -> gfporder = order ;
1660
- kmem_cache -> colour = left_over / kmem_cache -> colour_off ;
1661
- kmem_cache -> slab_size = ALIGN (kmem_cache -> num * sizeof (kmem_bufctl_t ) +
1662
- sizeof (struct slab ), cache_line_size ());
1621
+ create_boot_cache (kmem_cache , "kmem_cache" ,
1622
+ offsetof(struct kmem_cache , array [nr_cpu_ids ]) +
1623
+ nr_node_ids * sizeof (struct kmem_list3 * ),
1624
+ SLAB_HWCACHE_ALIGN );
1625
+ list_add (& kmem_cache -> list , & slab_caches );
1663
1626
1664
1627
/* 2+3) create the kmalloc caches */
1665
1628
sizes = malloc_sizes ;
@@ -1671,23 +1634,13 @@ void __init kmem_cache_init(void)
1671
1634
* bug.
1672
1635
*/
1673
1636
1674
- sizes [INDEX_AC ].cs_cachep = kmem_cache_zalloc (kmem_cache , GFP_NOWAIT );
1675
- sizes [INDEX_AC ].cs_cachep -> name = names [INDEX_AC ].name ;
1676
- sizes [INDEX_AC ].cs_cachep -> size = sizes [INDEX_AC ].cs_size ;
1677
- sizes [INDEX_AC ].cs_cachep -> object_size = sizes [INDEX_AC ].cs_size ;
1678
- sizes [INDEX_AC ].cs_cachep -> align = ARCH_KMALLOC_MINALIGN ;
1679
- __kmem_cache_create (sizes [INDEX_AC ].cs_cachep , ARCH_KMALLOC_FLAGS |SLAB_PANIC );
1680
- list_add (& sizes [INDEX_AC ].cs_cachep -> list , & slab_caches );
1681
-
1682
- if (INDEX_AC != INDEX_L3 ) {
1683
- sizes [INDEX_L3 ].cs_cachep = kmem_cache_zalloc (kmem_cache , GFP_NOWAIT );
1684
- sizes [INDEX_L3 ].cs_cachep -> name = names [INDEX_L3 ].name ;
1685
- sizes [INDEX_L3 ].cs_cachep -> size = sizes [INDEX_L3 ].cs_size ;
1686
- sizes [INDEX_L3 ].cs_cachep -> object_size = sizes [INDEX_L3 ].cs_size ;
1687
- sizes [INDEX_L3 ].cs_cachep -> align = ARCH_KMALLOC_MINALIGN ;
1688
- __kmem_cache_create (sizes [INDEX_L3 ].cs_cachep , ARCH_KMALLOC_FLAGS |SLAB_PANIC );
1689
- list_add (& sizes [INDEX_L3 ].cs_cachep -> list , & slab_caches );
1690
- }
1637
+ sizes [INDEX_AC ].cs_cachep = create_kmalloc_cache (names [INDEX_AC ].name ,
1638
+ sizes [INDEX_AC ].cs_size , ARCH_KMALLOC_FLAGS );
1639
+
1640
+ if (INDEX_AC != INDEX_L3 )
1641
+ sizes [INDEX_L3 ].cs_cachep =
1642
+ create_kmalloc_cache (names [INDEX_L3 ].name ,
1643
+ sizes [INDEX_L3 ].cs_size , ARCH_KMALLOC_FLAGS );
1691
1644
1692
1645
slab_early_init = 0 ;
1693
1646
@@ -1699,24 +1652,14 @@ void __init kmem_cache_init(void)
1699
1652
* Note for systems short on memory removing the alignment will
1700
1653
* allow tighter packing of the smaller caches.
1701
1654
*/
1702
- if (!sizes -> cs_cachep ) {
1703
- sizes -> cs_cachep = kmem_cache_zalloc (kmem_cache , GFP_NOWAIT );
1704
- sizes -> cs_cachep -> name = names -> name ;
1705
- sizes -> cs_cachep -> size = sizes -> cs_size ;
1706
- sizes -> cs_cachep -> object_size = sizes -> cs_size ;
1707
- sizes -> cs_cachep -> align = ARCH_KMALLOC_MINALIGN ;
1708
- __kmem_cache_create (sizes -> cs_cachep , ARCH_KMALLOC_FLAGS |SLAB_PANIC );
1709
- list_add (& sizes -> cs_cachep -> list , & slab_caches );
1710
- }
1655
+ if (!sizes -> cs_cachep )
1656
+ sizes -> cs_cachep = create_kmalloc_cache (names -> name ,
1657
+ sizes -> cs_size , ARCH_KMALLOC_FLAGS );
1658
+
1711
1659
#ifdef CONFIG_ZONE_DMA
1712
- sizes -> cs_dmacachep = kmem_cache_zalloc (kmem_cache , GFP_NOWAIT );
1713
- sizes -> cs_dmacachep -> name = names -> name_dma ;
1714
- sizes -> cs_dmacachep -> size = sizes -> cs_size ;
1715
- sizes -> cs_dmacachep -> object_size = sizes -> cs_size ;
1716
- sizes -> cs_dmacachep -> align = ARCH_KMALLOC_MINALIGN ;
1717
- __kmem_cache_create (sizes -> cs_dmacachep ,
1718
- ARCH_KMALLOC_FLAGS |SLAB_CACHE_DMA | SLAB_PANIC );
1719
- list_add (& sizes -> cs_dmacachep -> list , & slab_caches );
1660
+ sizes -> cs_dmacachep = create_kmalloc_cache (
1661
+ names -> name_dma , sizes -> cs_size ,
1662
+ SLAB_CACHE_DMA |ARCH_KMALLOC_FLAGS );
1720
1663
#endif
1721
1664
sizes ++ ;
1722
1665
names ++ ;
@@ -1727,7 +1670,6 @@ void __init kmem_cache_init(void)
1727
1670
1728
1671
ptr = kmalloc (sizeof (struct arraycache_init ), GFP_NOWAIT );
1729
1672
1730
- BUG_ON (cpu_cache_get (kmem_cache ) != & initarray_cache .cache );
1731
1673
memcpy (ptr , cpu_cache_get (kmem_cache ),
1732
1674
sizeof (struct arraycache_init ));
1733
1675
/*
@@ -2282,15 +2224,23 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2282
2224
2283
2225
if (slab_state == DOWN ) {
2284
2226
/*
2285
- * Note: the first kmem_cache_create must create the cache
2227
+ * Note: Creation of first cache (kmem_cache).
2228
+ * The setup_list3s is taken care
2229
+ * of by the caller of __kmem_cache_create
2230
+ */
2231
+ cachep -> array [smp_processor_id ()] = & initarray_generic .cache ;
2232
+ slab_state = PARTIAL ;
2233
+ } else if (slab_state == PARTIAL ) {
2234
+ /*
2235
+ * Note: the second kmem_cache_create must create the cache
2286
2236
* that's used by kmalloc(24), otherwise the creation of
2287
2237
* further caches will BUG().
2288
2238
*/
2289
2239
cachep -> array [smp_processor_id ()] = & initarray_generic .cache ;
2290
2240
2291
2241
/*
2292
2242
* If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2293
- * the first cache, then we need to set up all its list3s,
2243
+ * the second cache, then we need to set up all its list3s,
2294
2244
* otherwise the creation of further caches will BUG().
2295
2245
*/
2296
2246
set_up_list3s (cachep , SIZE_AC );
@@ -2299,6 +2249,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2299
2249
else
2300
2250
slab_state = PARTIAL_ARRAYCACHE ;
2301
2251
} else {
2252
+ /* Remaining boot caches */
2302
2253
cachep -> array [smp_processor_id ()] =
2303
2254
kmalloc (sizeof (struct arraycache_init ), gfp );
2304
2255
@@ -2331,11 +2282,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2331
2282
2332
2283
/**
2333
2284
* __kmem_cache_create - Create a cache.
2334
- * @name: A string which is used in /proc/slabinfo to identify this cache.
2335
- * @size: The size of objects to be created in this cache.
2336
- * @align: The required alignment for the objects.
2285
+ * @cachep: cache management descriptor
2337
2286
* @flags: SLAB flags
2338
- * @ctor: A constructor for the objects.
2339
2287
*
2340
2288
* Returns a ptr to the cache on success, NULL on failure.
2341
2289
* Cannot be called within a int, but can be interrupted.
@@ -2378,11 +2326,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2378
2326
if (flags & SLAB_DESTROY_BY_RCU )
2379
2327
BUG_ON (flags & SLAB_POISON );
2380
2328
#endif
2381
- /*
2382
- * Always checks flags, a caller might be expecting debug support which
2383
- * isn't available.
2384
- */
2385
- BUG_ON (flags & ~CREATE_MASK );
2386
2329
2387
2330
/*
2388
2331
* Check that size is in terms of words. This is needed to avoid
@@ -2394,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2394
2337
size &= ~(BYTES_PER_WORD - 1 );
2395
2338
}
2396
2339
2397
- /* calculate the final buffer alignment: */
2398
-
2399
- /* 1) arch recommendation: can be overridden for debug */
2400
- if (flags & SLAB_HWCACHE_ALIGN ) {
2401
- /*
2402
- * Default alignment: as specified by the arch code. Except if
2403
- * an object is really small, then squeeze multiple objects into
2404
- * one cacheline.
2405
- */
2406
- ralign = cache_line_size ();
2407
- while (size <= ralign / 2 )
2408
- ralign /= 2 ;
2409
- } else {
2410
- ralign = BYTES_PER_WORD ;
2411
- }
2412
-
2413
2340
/*
2414
2341
* Redzoning and user store require word alignment or possibly larger.
2415
2342
* Note this will be overridden by architecture or caller mandated
@@ -2426,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2426
2353
size &= ~(REDZONE_ALIGN - 1 );
2427
2354
}
2428
2355
2429
- /* 2) arch mandated alignment */
2430
- if (ralign < ARCH_SLAB_MINALIGN ) {
2431
- ralign = ARCH_SLAB_MINALIGN ;
2432
- }
2433
2356
/* 3) caller mandated alignment */
2434
2357
if (ralign < cachep -> align ) {
2435
2358
ralign = cachep -> align ;
@@ -2447,7 +2370,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2447
2370
else
2448
2371
gfp = GFP_NOWAIT ;
2449
2372
2450
- cachep -> nodelists = ( struct kmem_list3 * * ) & cachep -> array [ nr_cpu_ids ] ;
2373
+ setup_nodelists_pointer ( cachep ) ;
2451
2374
#if DEBUG
2452
2375
2453
2376
/*
@@ -3969,12 +3892,6 @@ void kfree(const void *objp)
3969
3892
}
3970
3893
EXPORT_SYMBOL (kfree );
3971
3894
3972
- unsigned int kmem_cache_size (struct kmem_cache * cachep )
3973
- {
3974
- return cachep -> object_size ;
3975
- }
3976
- EXPORT_SYMBOL (kmem_cache_size );
3977
-
3978
3895
/*
3979
3896
* This initializes kmem_list3 or resizes various caches for all nodes.
3980
3897
*/
0 commit comments