Skip to content

Commit 3c58346

Browse files
Christoph Lameterpenberg
authored andcommitted
slab: Simplify bootstrap
The nodelists field in kmem_cache is pointing to the first unused object in the array field when bootstrap is complete. A problem with the current approach is that the statically sized kmem_cache structure use on boot can only contain NR_CPUS entries. If the number of nodes plus the number of cpus is greater then we would overwrite memory following the kmem_cache_boot definition. Increase the size of the array field to ensure that also the node pointers fit into the array field. Once we do that we no longer need the kmem_cache_nodelists array and we can then also use that structure elsewhere. Acked-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
1 parent 59a0991 commit 3c58346

File tree

2 files changed

+18
-9
lines changed

2 files changed

+18
-9
lines changed

include/linux/slab_def.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,13 @@ struct kmem_cache {
8989
* (see kmem_cache_init())
9090
* We still use [NR_CPUS] and not [1] or [0] because cache_cache
9191
* is statically defined, so we reserve the max number of cpus.
92+
*
93+
* We also need to guarantee that the list is able to accomodate a
94+
* pointer for each node since "nodelists" uses the remainder of
95+
* available pointers.
9296
*/
9397
struct kmem_list3 **nodelists;
94-
struct array_cache *array[NR_CPUS];
98+
struct array_cache *array[NR_CPUS + MAX_NUMNODES];
9599
/*
96100
* Do not add fields after array[]
97101
*/

mm/slab.c

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -553,9 +553,7 @@ static struct arraycache_init initarray_generic =
553553
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
554554

555555
/* internal cache of cache description objs */
556-
static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
557556
static struct kmem_cache kmem_cache_boot = {
558-
.nodelists = kmem_cache_nodelists,
559557
.batchcount = 1,
560558
.limit = BOOT_CPUCACHE_ENTRIES,
561559
.shared = 1,
@@ -1559,6 +1557,15 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
15591557
}
15601558
}
15611559

1560+
/*
1561+
* The memory after the last cpu cache pointer is used for the
1562+
* the nodelists pointer.
1563+
*/
1564+
static void setup_nodelists_pointer(struct kmem_cache *cachep)
1565+
{
1566+
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
1567+
}
1568+
15621569
/*
15631570
* Initialisation. Called after the page allocator have been initialised and
15641571
* before smp_init().
@@ -1573,15 +1580,14 @@ void __init kmem_cache_init(void)
15731580
int node;
15741581

15751582
kmem_cache = &kmem_cache_boot;
1583+
setup_nodelists_pointer(kmem_cache);
15761584

15771585
if (num_possible_nodes() == 1)
15781586
use_alien_caches = 0;
15791587

1580-
for (i = 0; i < NUM_INIT_LISTS; i++) {
1588+
for (i = 0; i < NUM_INIT_LISTS; i++)
15811589
kmem_list3_init(&initkmem_list3[i]);
1582-
if (i < MAX_NUMNODES)
1583-
kmem_cache->nodelists[i] = NULL;
1584-
}
1590+
15851591
set_up_list3s(kmem_cache, CACHE_CACHE);
15861592

15871593
/*
@@ -1619,7 +1625,6 @@ void __init kmem_cache_init(void)
16191625
list_add(&kmem_cache->list, &slab_caches);
16201626
kmem_cache->colour_off = cache_line_size();
16211627
kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
1622-
kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
16231628

16241629
/*
16251630
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
@@ -2422,7 +2427,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
24222427
else
24232428
gfp = GFP_NOWAIT;
24242429

2425-
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2430+
setup_nodelists_pointer(cachep);
24262431
#if DEBUG
24272432

24282433
/*

0 commit comments

Comments
 (0)