Skip to content

Commit 08afe22

Browse files
committed
Merge branch 'slab/next' into slab/for-linus
Fix up a trivial merge conflict with commit baaf1dd ("mm/slob: use min_t() to compare ARCH_SLAB_MINALIGN") that did not go through the slab tree. Conflicts: mm/slob.c Signed-off-by: Pekka Enberg <penberg@kernel.org>
2 parents a304f83 + 4590685 commit 08afe22

File tree

8 files changed

+236
-333
lines changed

8 files changed

+236
-333
lines changed

include/linux/mm_types.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -128,10 +128,7 @@ struct page {
128128
};
129129

130130
struct list_head list; /* slobs list of pages */
131-
struct { /* slab fields */
132-
struct kmem_cache *slab_cache;
133-
struct slab *slab_page;
134-
};
131+
struct slab *slab_page; /* slab fields */
135132
};
136133

137134
/* Remainder is not double word aligned */
@@ -146,7 +143,7 @@ struct page {
146143
#if USE_SPLIT_PTLOCKS
147144
spinlock_t ptl;
148145
#endif
149-
struct kmem_cache *slab; /* SLUB: Pointer to slab */
146+
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
150147
struct page *first_page; /* Compound tail pages */
151148
};
152149

include/linux/slab.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128128
void kmem_cache_destroy(struct kmem_cache *);
129129
int kmem_cache_shrink(struct kmem_cache *);
130130
void kmem_cache_free(struct kmem_cache *, void *);
131-
unsigned int kmem_cache_size(struct kmem_cache *);
132131

133132
/*
134133
* Please use this macro to create slab caches. Simply specify the
@@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
388387
return kmalloc_node(size, flags | __GFP_ZERO, node);
389388
}
390389

390+
/*
391+
* Determine the size of a slab object
392+
*/
393+
static inline unsigned int kmem_cache_size(struct kmem_cache *s)
394+
{
395+
return s->object_size;
396+
}
397+
391398
void __init kmem_cache_init_late(void);
392399

393400
#endif /* _LINUX_SLAB_H */

include/linux/slab_def.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,13 @@ struct kmem_cache {
8989
* (see kmem_cache_init())
9090
* We still use [NR_CPUS] and not [1] or [0] because cache_cache
9191
* is statically defined, so we reserve the max number of cpus.
92+
*
93+
* We also need to guarantee that the list is able to accomodate a
94+
* pointer for each node since "nodelists" uses the remainder of
95+
* available pointers.
9296
*/
9397
struct kmem_list3 **nodelists;
94-
struct array_cache *array[NR_CPUS];
98+
struct array_cache *array[NR_CPUS + MAX_NUMNODES];
9599
/*
96100
* Do not add fields after array[]
97101
*/

mm/slab.c

Lines changed: 44 additions & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -162,23 +162,6 @@
162162
*/
163163
static bool pfmemalloc_active __read_mostly;
164164

165-
/* Legal flag mask for kmem_cache_create(). */
166-
#if DEBUG
167-
# define CREATE_MASK (SLAB_RED_ZONE | \
168-
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
169-
SLAB_CACHE_DMA | \
170-
SLAB_STORE_USER | \
171-
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
172-
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
173-
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
174-
#else
175-
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
176-
SLAB_CACHE_DMA | \
177-
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
178-
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
179-
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
180-
#endif
181-
182165
/*
183166
* kmem_bufctl_t:
184167
*
@@ -564,15 +547,11 @@ static struct cache_names __initdata cache_names[] = {
564547
#undef CACHE
565548
};
566549

567-
static struct arraycache_init initarray_cache __initdata =
568-
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
569550
static struct arraycache_init initarray_generic =
570551
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
571552

572553
/* internal cache of cache description objs */
573-
static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
574554
static struct kmem_cache kmem_cache_boot = {
575-
.nodelists = kmem_cache_nodelists,
576555
.batchcount = 1,
577556
.limit = BOOT_CPUCACHE_ENTRIES,
578557
.shared = 1,
@@ -1576,29 +1555,34 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
15761555
}
15771556
}
15781557

1558+
/*
1559+
* The memory after the last cpu cache pointer is used for the
1560+
* the nodelists pointer.
1561+
*/
1562+
static void setup_nodelists_pointer(struct kmem_cache *cachep)
1563+
{
1564+
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
1565+
}
1566+
15791567
/*
15801568
* Initialisation. Called after the page allocator have been initialised and
15811569
* before smp_init().
15821570
*/
15831571
void __init kmem_cache_init(void)
15841572
{
1585-
size_t left_over;
15861573
struct cache_sizes *sizes;
15871574
struct cache_names *names;
15881575
int i;
1589-
int order;
1590-
int node;
15911576

15921577
kmem_cache = &kmem_cache_boot;
1578+
setup_nodelists_pointer(kmem_cache);
15931579

15941580
if (num_possible_nodes() == 1)
15951581
use_alien_caches = 0;
15961582

1597-
for (i = 0; i < NUM_INIT_LISTS; i++) {
1583+
for (i = 0; i < NUM_INIT_LISTS; i++)
15981584
kmem_list3_init(&initkmem_list3[i]);
1599-
if (i < MAX_NUMNODES)
1600-
kmem_cache->nodelists[i] = NULL;
1601-
}
1585+
16021586
set_up_list3s(kmem_cache, CACHE_CACHE);
16031587

16041588
/*
@@ -1629,37 +1613,16 @@ void __init kmem_cache_init(void)
16291613
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
16301614
*/
16311615

1632-
node = numa_mem_id();
1633-
16341616
/* 1) create the kmem_cache */
1635-
INIT_LIST_HEAD(&slab_caches);
1636-
list_add(&kmem_cache->list, &slab_caches);
1637-
kmem_cache->colour_off = cache_line_size();
1638-
kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
1639-
kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
16401617

16411618
/*
16421619
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
16431620
*/
1644-
kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1645-
nr_node_ids * sizeof(struct kmem_list3 *);
1646-
kmem_cache->object_size = kmem_cache->size;
1647-
kmem_cache->size = ALIGN(kmem_cache->object_size,
1648-
cache_line_size());
1649-
kmem_cache->reciprocal_buffer_size =
1650-
reciprocal_value(kmem_cache->size);
1651-
1652-
for (order = 0; order < MAX_ORDER; order++) {
1653-
cache_estimate(order, kmem_cache->size,
1654-
cache_line_size(), 0, &left_over, &kmem_cache->num);
1655-
if (kmem_cache->num)
1656-
break;
1657-
}
1658-
BUG_ON(!kmem_cache->num);
1659-
kmem_cache->gfporder = order;
1660-
kmem_cache->colour = left_over / kmem_cache->colour_off;
1661-
kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
1662-
sizeof(struct slab), cache_line_size());
1621+
create_boot_cache(kmem_cache, "kmem_cache",
1622+
offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1623+
nr_node_ids * sizeof(struct kmem_list3 *),
1624+
SLAB_HWCACHE_ALIGN);
1625+
list_add(&kmem_cache->list, &slab_caches);
16631626

16641627
/* 2+3) create the kmalloc caches */
16651628
sizes = malloc_sizes;
@@ -1671,23 +1634,13 @@ void __init kmem_cache_init(void)
16711634
* bug.
16721635
*/
16731636

1674-
sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1675-
sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
1676-
sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
1677-
sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
1678-
sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1679-
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1680-
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
1681-
1682-
if (INDEX_AC != INDEX_L3) {
1683-
sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1684-
sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
1685-
sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
1686-
sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
1687-
sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1688-
__kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1689-
list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
1690-
}
1637+
sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
1638+
sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
1639+
1640+
if (INDEX_AC != INDEX_L3)
1641+
sizes[INDEX_L3].cs_cachep =
1642+
create_kmalloc_cache(names[INDEX_L3].name,
1643+
sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
16911644

16921645
slab_early_init = 0;
16931646

@@ -1699,24 +1652,14 @@ void __init kmem_cache_init(void)
16991652
* Note for systems short on memory removing the alignment will
17001653
* allow tighter packing of the smaller caches.
17011654
*/
1702-
if (!sizes->cs_cachep) {
1703-
sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1704-
sizes->cs_cachep->name = names->name;
1705-
sizes->cs_cachep->size = sizes->cs_size;
1706-
sizes->cs_cachep->object_size = sizes->cs_size;
1707-
sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1708-
__kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1709-
list_add(&sizes->cs_cachep->list, &slab_caches);
1710-
}
1655+
if (!sizes->cs_cachep)
1656+
sizes->cs_cachep = create_kmalloc_cache(names->name,
1657+
sizes->cs_size, ARCH_KMALLOC_FLAGS);
1658+
17111659
#ifdef CONFIG_ZONE_DMA
1712-
sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1713-
sizes->cs_dmacachep->name = names->name_dma;
1714-
sizes->cs_dmacachep->size = sizes->cs_size;
1715-
sizes->cs_dmacachep->object_size = sizes->cs_size;
1716-
sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
1717-
__kmem_cache_create(sizes->cs_dmacachep,
1718-
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
1719-
list_add(&sizes->cs_dmacachep->list, &slab_caches);
1660+
sizes->cs_dmacachep = create_kmalloc_cache(
1661+
names->name_dma, sizes->cs_size,
1662+
SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
17201663
#endif
17211664
sizes++;
17221665
names++;
@@ -1727,7 +1670,6 @@ void __init kmem_cache_init(void)
17271670

17281671
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
17291672

1730-
BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
17311673
memcpy(ptr, cpu_cache_get(kmem_cache),
17321674
sizeof(struct arraycache_init));
17331675
/*
@@ -2282,15 +2224,23 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
22822224

22832225
if (slab_state == DOWN) {
22842226
/*
2285-
* Note: the first kmem_cache_create must create the cache
2227+
* Note: Creation of first cache (kmem_cache).
2228+
* The setup_list3s is taken care
2229+
* of by the caller of __kmem_cache_create
2230+
*/
2231+
cachep->array[smp_processor_id()] = &initarray_generic.cache;
2232+
slab_state = PARTIAL;
2233+
} else if (slab_state == PARTIAL) {
2234+
/*
2235+
* Note: the second kmem_cache_create must create the cache
22862236
* that's used by kmalloc(24), otherwise the creation of
22872237
* further caches will BUG().
22882238
*/
22892239
cachep->array[smp_processor_id()] = &initarray_generic.cache;
22902240

22912241
/*
22922242
* If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2293-
* the first cache, then we need to set up all its list3s,
2243+
* the second cache, then we need to set up all its list3s,
22942244
* otherwise the creation of further caches will BUG().
22952245
*/
22962246
set_up_list3s(cachep, SIZE_AC);
@@ -2299,6 +2249,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
22992249
else
23002250
slab_state = PARTIAL_ARRAYCACHE;
23012251
} else {
2252+
/* Remaining boot caches */
23022253
cachep->array[smp_processor_id()] =
23032254
kmalloc(sizeof(struct arraycache_init), gfp);
23042255

@@ -2331,11 +2282,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
23312282

23322283
/**
23332284
* __kmem_cache_create - Create a cache.
2334-
* @name: A string which is used in /proc/slabinfo to identify this cache.
2335-
* @size: The size of objects to be created in this cache.
2336-
* @align: The required alignment for the objects.
2285+
* @cachep: cache management descriptor
23372286
* @flags: SLAB flags
2338-
* @ctor: A constructor for the objects.
23392287
*
23402288
* Returns a ptr to the cache on success, NULL on failure.
23412289
* Cannot be called within a int, but can be interrupted.
@@ -2378,11 +2326,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
23782326
if (flags & SLAB_DESTROY_BY_RCU)
23792327
BUG_ON(flags & SLAB_POISON);
23802328
#endif
2381-
/*
2382-
* Always checks flags, a caller might be expecting debug support which
2383-
* isn't available.
2384-
*/
2385-
BUG_ON(flags & ~CREATE_MASK);
23862329

23872330
/*
23882331
* Check that size is in terms of words. This is needed to avoid
@@ -2394,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
23942337
size &= ~(BYTES_PER_WORD - 1);
23952338
}
23962339

2397-
/* calculate the final buffer alignment: */
2398-
2399-
/* 1) arch recommendation: can be overridden for debug */
2400-
if (flags & SLAB_HWCACHE_ALIGN) {
2401-
/*
2402-
* Default alignment: as specified by the arch code. Except if
2403-
* an object is really small, then squeeze multiple objects into
2404-
* one cacheline.
2405-
*/
2406-
ralign = cache_line_size();
2407-
while (size <= ralign / 2)
2408-
ralign /= 2;
2409-
} else {
2410-
ralign = BYTES_PER_WORD;
2411-
}
2412-
24132340
/*
24142341
* Redzoning and user store require word alignment or possibly larger.
24152342
* Note this will be overridden by architecture or caller mandated
@@ -2426,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
24262353
size &= ~(REDZONE_ALIGN - 1);
24272354
}
24282355

2429-
/* 2) arch mandated alignment */
2430-
if (ralign < ARCH_SLAB_MINALIGN) {
2431-
ralign = ARCH_SLAB_MINALIGN;
2432-
}
24332356
/* 3) caller mandated alignment */
24342357
if (ralign < cachep->align) {
24352358
ralign = cachep->align;
@@ -2447,7 +2370,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
24472370
else
24482371
gfp = GFP_NOWAIT;
24492372

2450-
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2373+
setup_nodelists_pointer(cachep);
24512374
#if DEBUG
24522375

24532376
/*
@@ -3969,12 +3892,6 @@ void kfree(const void *objp)
39693892
}
39703893
EXPORT_SYMBOL(kfree);
39713894

3972-
unsigned int kmem_cache_size(struct kmem_cache *cachep)
3973-
{
3974-
return cachep->object_size;
3975-
}
3976-
EXPORT_SYMBOL(kmem_cache_size);
3977-
39783895
/*
39793896
* This initializes kmem_list3 or resizes various caches for all nodes.
39803897
*/

0 commit comments

Comments
 (0)