Skip to content

Commit 4077985

Browse files
committed
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: SLAB: Record actual last user of freed objects. slub: always align cpu_slab to honor cmpxchg_double requirement
2 parents ffdb8f1 + a947eb9 commit 4077985

File tree

3 files changed

+12
-12
lines changed

3 files changed

+12
-12
lines changed

include/linux/percpu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,9 @@ extern void __bad_size_call_parameter(void);
259259
* Special handling for cmpxchg_double. cmpxchg_double is passed two
260260
* percpu variables. The first has to be aligned to a double word
261261
* boundary and the second has to follow directly thereafter.
262+
* We enforce this on all architectures even if they don't support
263+
* a double cmpxchg instruction, since it's a cheap requirement, and it
264+
* avoids breaking the requirement for architectures with the instruction.
262265
*/
263266
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
264267
({ \

mm/slab.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3604,13 +3604,14 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
36043604
* Release an obj back to its cache. If the obj has a constructed state, it must
36053605
* be in this state _before_ it is released. Called with disabled ints.
36063606
*/
3607-
static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3607+
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3608+
void *caller)
36083609
{
36093610
struct array_cache *ac = cpu_cache_get(cachep);
36103611

36113612
check_irq_off();
36123613
kmemleak_free_recursive(objp, cachep->flags);
3613-
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3614+
objp = cache_free_debugcheck(cachep, objp, caller);
36143615

36153616
kmemcheck_slab_free(cachep, objp, obj_size(cachep));
36163617

@@ -3801,7 +3802,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
38013802
debug_check_no_locks_freed(objp, obj_size(cachep));
38023803
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
38033804
debug_check_no_obj_freed(objp, obj_size(cachep));
3804-
__cache_free(cachep, objp);
3805+
__cache_free(cachep, objp, __builtin_return_address(0));
38053806
local_irq_restore(flags);
38063807

38073808
trace_kmem_cache_free(_RET_IP_, objp);
@@ -3831,7 +3832,7 @@ void kfree(const void *objp)
38313832
c = virt_to_cache(objp);
38323833
debug_check_no_locks_freed(objp, obj_size(c));
38333834
debug_check_no_obj_freed(objp, obj_size(c));
3834-
__cache_free(c, (void *)objp);
3835+
__cache_free(c, (void *)objp, __builtin_return_address(0));
38353836
local_irq_restore(flags);
38363837
}
38373838
EXPORT_SYMBOL(kfree);

mm/slub.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
23202320
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
23212321
SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
23222322

2323-
#ifdef CONFIG_CMPXCHG_LOCAL
23242323
/*
2325-
* Must align to double word boundary for the double cmpxchg instructions
2326-
* to work.
2324+
* Must align to double word boundary for the double cmpxchg
2325+
* instructions to work; see __pcpu_double_call_return_bool().
23272326
*/
2328-
s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
2329-
#else
2330-
/* Regular alignment is sufficient */
2331-
s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2332-
#endif
2327+
s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2328+
2 * sizeof(void *));
23332329

23342330
if (!s->cpu_slab)
23352331
return 0;

0 commit comments

Comments
 (0)