Skip to content

Commit 54be820

Browse files
committed
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab update from Pekka Enberg: "Highlights: - Fix for boot-time problems on some architectures due to init_lock_keys() not respecting kmalloc_caches boundaries (Christoph Lameter) - CONFIG_SLUB_CPU_PARTIAL requested by RT folks (Joonsoo Kim) - Fix for excessive slab freelist draining (Wanpeng Li) - SLUB and SLOB cleanups and fixes (various people)" I ended up editing the branch, and this avoids two commits at the end that were immediately reverted, and I instead just applied the oneliner fix in between myself. * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux slub: Check for page NULL before doing the node_match check mm/slab: Give s_next and s_stop slab-specific names slob: Check for NULL pointer before calling ctor() slub: Make cpu partial slab support configurable slab: add kmalloc() to kernel API documentation slab: fix init_lock_keys slob: use DIV_ROUND_UP where possible slub: do not put a slab to cpu partial list when cpu_partial is 0 mm/slub: Use node_nr_slabs and node_nr_objs in get_slabinfo mm/slub: Drop unnecessary nr_partials mm/slab: Fix /proc/slabinfo unwriteable for slab mm/slab: Sharing s_next and s_stop between slab and slub mm/slab: Fix drain freelist excessively slob: Rework #ifdeffery in slab.h mm, slab: moved kmem_cache_alloc_node comment to correct place
2 parents 41d9884 + c25f195 commit 54be820

File tree

8 files changed

+121
-69
lines changed

8 files changed

+121
-69
lines changed

include/linux/slab.h

Lines changed: 42 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -169,11 +169,7 @@ struct kmem_cache {
169169
struct list_head list; /* List of all slab caches on the system */
170170
};
171171

172-
#define KMALLOC_MAX_SIZE (1UL << 30)
173-
174-
#include <linux/slob_def.h>
175-
176-
#else /* CONFIG_SLOB */
172+
#endif /* CONFIG_SLOB */
177173

178174
/*
179175
* Kmalloc array related definitions
@@ -195,7 +191,9 @@ struct kmem_cache {
195191
#ifndef KMALLOC_SHIFT_LOW
196192
#define KMALLOC_SHIFT_LOW 5
197193
#endif
198-
#else
194+
#endif
195+
196+
#ifdef CONFIG_SLUB
199197
/*
200198
* SLUB allocates up to order 2 pages directly and otherwise
201199
* passes the request to the page allocator.
@@ -207,6 +205,19 @@ struct kmem_cache {
207205
#endif
208206
#endif
209207

208+
#ifdef CONFIG_SLOB
209+
/*
210+
* SLOB passes all page size and larger requests to the page allocator.
211+
* No kmalloc array is necessary since objects of different sizes can
212+
* be allocated from the same page.
213+
*/
214+
#define KMALLOC_SHIFT_MAX 30
215+
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
216+
#ifndef KMALLOC_SHIFT_LOW
217+
#define KMALLOC_SHIFT_LOW 3
218+
#endif
219+
#endif
220+
210221
/* Maximum allocatable size */
211222
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
212223
/* Maximum size for which we actually use a slab cache */
@@ -221,6 +232,7 @@ struct kmem_cache {
221232
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
222233
#endif
223234

235+
#ifndef CONFIG_SLOB
224236
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
225237
#ifdef CONFIG_ZONE_DMA
226238
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
@@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size)
275287
/* Will never be reached. Needed because the compiler may complain */
276288
return -1;
277289
}
290+
#endif /* !CONFIG_SLOB */
278291

279292
#ifdef CONFIG_SLAB
280293
#include <linux/slab_def.h>
281-
#elif defined(CONFIG_SLUB)
294+
#endif
295+
296+
#ifdef CONFIG_SLUB
282297
#include <linux/slub_def.h>
283-
#else
284-
#error "Unknown slab allocator"
298+
#endif
299+
300+
#ifdef CONFIG_SLOB
301+
#include <linux/slob_def.h>
285302
#endif
286303

287304
/*
@@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size)
291308
*/
292309
static __always_inline int kmalloc_size(int n)
293310
{
311+
#ifndef CONFIG_SLOB
294312
if (n > 2)
295313
return 1 << n;
296314

@@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n)
299317

300318
if (n == 2 && KMALLOC_MIN_SIZE <= 64)
301319
return 192;
302-
320+
#endif
303321
return 0;
304322
}
305-
#endif /* !CONFIG_SLOB */
306323

307324
/*
308325
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
@@ -356,9 +373,8 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
356373
void print_slabinfo_header(struct seq_file *m);
357374

358375
/**
359-
* kmalloc_array - allocate memory for an array.
360-
* @n: number of elements.
361-
* @size: element size.
376+
* kmalloc - allocate memory
377+
* @size: how many bytes of memory are required.
362378
* @flags: the type of memory to allocate.
363379
*
364380
* The @flags argument may be one of:
@@ -405,6 +421,17 @@ void print_slabinfo_header(struct seq_file *m);
405421
* There are other flags available as well, but these are not intended
406422
* for general use, and so are not documented here. For a full list of
407423
* potential flags, always refer to linux/gfp.h.
424+
*
425+
* kmalloc is the normal method of allocating memory
426+
* in the kernel.
427+
*/
428+
static __always_inline void *kmalloc(size_t size, gfp_t flags);
429+
430+
/**
431+
* kmalloc_array - allocate memory for an array.
432+
* @n: number of elements.
433+
* @size: element size.
434+
* @flags: the type of memory to allocate (see kmalloc).
408435
*/
409436
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
410437
{
@@ -428,7 +455,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
428455
/**
429456
* kmalloc_node - allocate memory from a specific node
430457
* @size: how many bytes of memory are required.
431-
* @flags: the type of memory to allocate (see kcalloc).
458+
* @flags: the type of memory to allocate (see kmalloc).
432459
* @node: node to allocate from.
433460
*
434461
* kmalloc() for non-local nodes, used to allocate from a specific node

include/linux/slob_def.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
1818
return __kmalloc_node(size, flags, node);
1919
}
2020

21-
/**
22-
* kmalloc - allocate memory
23-
* @size: how many bytes of memory are required.
24-
* @flags: the type of memory to allocate (see kcalloc).
25-
*
26-
* kmalloc is the normal method of allocating memory
27-
* in the kernel.
28-
*/
2921
static __always_inline void *kmalloc(size_t size, gfp_t flags)
3022
{
3123
return __kmalloc_node(size, flags, NUMA_NO_NODE);

init/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1596,6 +1596,17 @@ config SLOB
15961596

15971597
endchoice
15981598

1599+
config SLUB_CPU_PARTIAL
1600+
default y
1601+
depends on SLUB
1602+
bool "SLUB per cpu partial cache"
1603+
help
1604+
Per cpu partial caches accellerate objects allocation and freeing
1605+
that is local to a processor at the price of more indeterminism
1606+
in the latency of the free. On overflow these caches will be cleared
1607+
which requires the taking of locks that may cause latency spikes.
1608+
Typically one would choose no for a realtime system.
1609+
15991610
config MMAP_ALLOW_UNINITIALIZED
16001611
bool "Allow mmapped anonymous memory to be uninitialized"
16011612
depends on EXPERT && !MMU

mm/slab.c

Lines changed: 23 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,7 @@ static void init_node_lock_keys(int q)
565565
if (slab_state < UP)
566566
return;
567567

568-
for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
568+
for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
569569
struct kmem_cache_node *n;
570570
struct kmem_cache *cache = kmalloc_caches[i];
571571

@@ -1180,6 +1180,12 @@ static int init_cache_node_node(int node)
11801180
return 0;
11811181
}
11821182

1183+
static inline int slabs_tofree(struct kmem_cache *cachep,
1184+
struct kmem_cache_node *n)
1185+
{
1186+
return (n->free_objects + cachep->num - 1) / cachep->num;
1187+
}
1188+
11831189
static void __cpuinit cpuup_canceled(long cpu)
11841190
{
11851191
struct kmem_cache *cachep;
@@ -1241,7 +1247,7 @@ static void __cpuinit cpuup_canceled(long cpu)
12411247
n = cachep->node[node];
12421248
if (!n)
12431249
continue;
1244-
drain_freelist(cachep, n, n->free_objects);
1250+
drain_freelist(cachep, n, slabs_tofree(cachep, n));
12451251
}
12461252
}
12471253

@@ -1408,7 +1414,7 @@ static int __meminit drain_cache_node_node(int node)
14081414
if (!n)
14091415
continue;
14101416

1411-
drain_freelist(cachep, n, n->free_objects);
1417+
drain_freelist(cachep, n, slabs_tofree(cachep, n));
14121418

14131419
if (!list_empty(&n->slabs_full) ||
14141420
!list_empty(&n->slabs_partial)) {
@@ -2532,7 +2538,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
25322538
if (!n)
25332539
continue;
25342540

2535-
drain_freelist(cachep, n, n->free_objects);
2541+
drain_freelist(cachep, n, slabs_tofree(cachep, n));
25362542

25372543
ret += !list_empty(&n->slabs_full) ||
25382544
!list_empty(&n->slabs_partial);
@@ -3338,18 +3344,6 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
33383344
return obj;
33393345
}
33403346

3341-
/**
3342-
* kmem_cache_alloc_node - Allocate an object on the specified node
3343-
* @cachep: The cache to allocate from.
3344-
* @flags: See kmalloc().
3345-
* @nodeid: node number of the target node.
3346-
* @caller: return address of caller, used for debug information
3347-
*
3348-
* Identical to kmem_cache_alloc but it will allocate memory on the given
3349-
* node, which can improve the performance for cpu bound structures.
3350-
*
3351-
* Fallback to other node is possible if __GFP_THISNODE is not set.
3352-
*/
33533347
static __always_inline void *
33543348
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
33553349
unsigned long caller)
@@ -3643,6 +3637,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
36433637
#endif
36443638

36453639
#ifdef CONFIG_NUMA
3640+
/**
3641+
* kmem_cache_alloc_node - Allocate an object on the specified node
3642+
* @cachep: The cache to allocate from.
3643+
* @flags: See kmalloc().
3644+
* @nodeid: node number of the target node.
3645+
*
3646+
* Identical to kmem_cache_alloc but it will allocate memory on the given
3647+
* node, which can improve the performance for cpu bound structures.
3648+
*
3649+
* Fallback to other node is possible if __GFP_THISNODE is not set.
3650+
*/
36463651
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
36473652
{
36483653
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
@@ -4431,20 +4436,10 @@ static int leaks_show(struct seq_file *m, void *p)
44314436
return 0;
44324437
}
44334438

4434-
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4435-
{
4436-
return seq_list_next(p, &slab_caches, pos);
4437-
}
4438-
4439-
static void s_stop(struct seq_file *m, void *p)
4440-
{
4441-
mutex_unlock(&slab_mutex);
4442-
}
4443-
44444439
static const struct seq_operations slabstats_op = {
44454440
.start = leaks_start,
4446-
.next = s_next,
4447-
.stop = s_stop,
4441+
.next = slab_next,
4442+
.stop = slab_stop,
44484443
.show = leaks_show,
44494444
};
44504445

mm/slab.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,3 +271,6 @@ struct kmem_cache_node {
271271
#endif
272272

273273
};
274+
275+
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
276+
void slab_stop(struct seq_file *m, void *p);

mm/slab_common.c

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -497,6 +497,13 @@ void __init create_kmalloc_caches(unsigned long flags)
497497

498498

499499
#ifdef CONFIG_SLABINFO
500+
501+
#ifdef CONFIG_SLAB
502+
#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
503+
#else
504+
#define SLABINFO_RIGHTS S_IRUSR
505+
#endif
506+
500507
void print_slabinfo_header(struct seq_file *m)
501508
{
502509
/*
@@ -531,12 +538,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
531538
return seq_list_start(&slab_caches, *pos);
532539
}
533540

534-
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
541+
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
535542
{
536543
return seq_list_next(p, &slab_caches, pos);
537544
}
538545

539-
static void s_stop(struct seq_file *m, void *p)
546+
void slab_stop(struct seq_file *m, void *p)
540547
{
541548
mutex_unlock(&slab_mutex);
542549
}
@@ -613,8 +620,8 @@ static int s_show(struct seq_file *m, void *p)
613620
*/
614621
static const struct seq_operations slabinfo_op = {
615622
.start = s_start,
616-
.next = s_next,
617-
.stop = s_stop,
623+
.next = slab_next,
624+
.stop = slab_stop,
618625
.show = s_show,
619626
};
620627

@@ -633,7 +640,8 @@ static const struct file_operations proc_slabinfo_operations = {
633640

634641
static int __init slab_proc_init(void)
635642
{
636-
proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
643+
proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
644+
&proc_slabinfo_operations);
637645
return 0;
638646
}
639647
module_init(slab_proc_init);

mm/slob.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ static inline void clear_slob_page_free(struct page *sp)
122122
}
123123

124124
#define SLOB_UNIT sizeof(slob_t)
125-
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
125+
#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
126126

127127
/*
128128
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
@@ -554,7 +554,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
554554
flags, node);
555555
}
556556

557-
if (c->ctor)
557+
if (b && c->ctor)
558558
c->ctor(b);
559559

560560
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);

0 commit comments

Comments
 (0)