Skip to content

Commit 445d41d

Browse files
committed
Merge branch 'slab/for-6.1/kmalloc_size_roundup' into slab/for-next
The first two patches from a series by Kees Cook [1] that introduce kmalloc_size_roundup(). This will allow merging of per-subsystem patches using the new function and ultimately stop (ab)using ksize() in a way that causes ongoing trouble for debugging functionality and static checkers. [1] https://lore.kernel.org/all/20220923202822.2667581-1-keescook@chromium.org/ -- Resolved a conflict of modifying mm/slab.c __ksize() comment with a commit that unifies __ksize() implementation into mm/slab_common.c
2 parents af961f8 + 05a9406 commit 445d41d

File tree

5 files changed

+93
-13
lines changed

5 files changed

+93
-13
lines changed

include/linux/compiler_attributes.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,8 @@
3535

3636
/*
3737
* Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
38-
* available and includes other attributes.
38+
* available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
39+
* in compiler-gcc.h, due to misbehaviors.
3940
*
4041
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
4142
* clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size

include/linux/compiler_types.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -271,14 +271,16 @@ struct ftrace_likely_data {
271271

272272
/*
273273
* Any place that could be marked with the "alloc_size" attribute is also
274-
* a place to be marked with the "malloc" attribute. Do this as part of the
275-
* __alloc_size macro to avoid redundant attributes and to avoid missing a
276-
* __malloc marking.
274+
* a place to be marked with the "malloc" attribute, except those that may
275+
* be performing a _reallocation_, as that may alias the existing pointer.
276+
* For these, use __realloc_size().
277277
*/
278278
#ifdef __alloc_size__
279279
# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
280+
# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
280281
#else
281282
# define __alloc_size(x, ...) __malloc
283+
# define __realloc_size(x, ...)
282284
#endif
283285

284286
#ifndef asm_volatile_goto

include/linux/slab.h

Lines changed: 38 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -186,10 +186,25 @@ int kmem_cache_shrink(struct kmem_cache *s);
186186
/*
187187
* Common kmalloc functions provided by all allocators
188188
*/
189-
void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
189+
void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
190190
void kfree(const void *objp);
191191
void kfree_sensitive(const void *objp);
192+
size_t __ksize(const void *objp);
193+
194+
/**
195+
* ksize - Report actual allocation size of associated object
196+
*
197+
* @objp: Pointer returned from a prior kmalloc()-family allocation.
198+
*
199+
* This should not be used for writing beyond the originally requested
200+
* allocation size. Either use krealloc() or round up the allocation size
201+
* with kmalloc_size_roundup() prior to allocation. If this is used to
202+
* access beyond the originally requested allocation size, UBSAN_BOUNDS
203+
* and/or FORTIFY_SOURCE may trip, since they only know about the
204+
* originally allocated size via the __alloc_size attribute.
205+
*/
192206
size_t ksize(const void *objp);
207+
193208
#ifdef CONFIG_PRINTK
194209
bool kmem_valid_obj(void *object);
195210
void kmem_dump_obj(void *object);
@@ -614,10 +629,10 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
614629
* @new_size: new size of a single member of the array
615630
* @flags: the type of memory to allocate (see kmalloc)
616631
*/
617-
static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
618-
size_t new_n,
619-
size_t new_size,
620-
gfp_t flags)
632+
static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
633+
size_t new_n,
634+
size_t new_size,
635+
gfp_t flags)
621636
{
622637
size_t bytes;
623638

@@ -732,11 +747,28 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
732747
}
733748

734749
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
735-
__alloc_size(3);
750+
__realloc_size(3);
736751
extern void kvfree(const void *addr);
737752
extern void kvfree_sensitive(const void *addr, size_t len);
738753

739754
unsigned int kmem_cache_size(struct kmem_cache *s);
755+
756+
/**
757+
* kmalloc_size_roundup - Report allocation bucket size for the given size
758+
*
759+
* @size: Number of bytes to round up from.
760+
*
761+
* This returns the number of bytes that would be available in a kmalloc()
762+
* allocation of @size bytes. For example, a 126 byte request would be
763+
* rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
764+
* for the general-purpose kmalloc()-based allocations, and is not for the
765+
* pre-sized kmem_cache_alloc()-based allocations.)
766+
*
767+
* Use this to kmalloc() the full bucket size ahead of time instead of using
768+
* ksize() to query the size after an allocation.
769+
*/
770+
size_t kmalloc_size_roundup(size_t size);
771+
740772
void __init kmem_cache_init_late(void);
741773

742774
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)

mm/slab_common.c

Lines changed: 34 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -734,6 +734,26 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
734734
return kmalloc_caches[kmalloc_type(flags)][index];
735735
}
736736

737+
size_t kmalloc_size_roundup(size_t size)
738+
{
739+
struct kmem_cache *c;
740+
741+
/* Short-circuit the 0 size case. */
742+
if (unlikely(size == 0))
743+
return 0;
744+
/* Short-circuit saturated "too-large" case. */
745+
if (unlikely(size == SIZE_MAX))
746+
return SIZE_MAX;
747+
/* Above the smaller buckets, size is a multiple of page size. */
748+
if (size > KMALLOC_MAX_CACHE_SIZE)
749+
return PAGE_SIZE << get_order(size);
750+
751+
/* The flags don't matter since size_index is common to all. */
752+
c = kmalloc_slab(size, GFP_KERNEL);
753+
return c ? c->object_size : 0;
754+
}
755+
EXPORT_SYMBOL(kmalloc_size_roundup);
756+
737757
#ifdef CONFIG_ZONE_DMA
738758
#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
739759
#else
@@ -987,7 +1007,18 @@ void kfree(const void *object)
9871007
}
9881008
EXPORT_SYMBOL(kfree);
9891009

990-
/* Uninstrumented ksize. Only called by KASAN. */
1010+
/**
1011+
* __ksize -- Report full size of underlying allocation
1012+
* @objp: pointer to the object
1013+
*
1014+
* This should only be used internally to query the true size of allocations.
1015+
* It is not meant to be a way to discover the usable size of an allocation
1016+
* after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
1017+
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
1018+
* and/or FORTIFY_SOURCE.
1019+
*
1020+
* Return: size of the actual memory used by @objp in bytes
1021+
*/
9911022
size_t __ksize(const void *object)
9921023
{
9931024
struct folio *folio;
@@ -1294,8 +1325,8 @@ module_init(slab_proc_init);
12941325

12951326
#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
12961327

1297-
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1298-
gfp_t flags)
1328+
static __always_inline __realloc_size(2) void *
1329+
__do_krealloc(const void *p, size_t new_size, gfp_t flags)
12991330
{
13001331
void *ret;
13011332
size_t ks;

mm/slob.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -564,6 +564,20 @@ void kfree(const void *block)
564564
}
565565
EXPORT_SYMBOL(kfree);
566566

567+
size_t kmalloc_size_roundup(size_t size)
568+
{
569+
/* Short-circuit the 0 size case. */
570+
if (unlikely(size == 0))
571+
return 0;
572+
/* Short-circuit saturated "too-large" case. */
573+
if (unlikely(size == SIZE_MAX))
574+
return SIZE_MAX;
575+
576+
return ALIGN(size, ARCH_KMALLOC_MINALIGN);
577+
}
578+
579+
EXPORT_SYMBOL(kmalloc_size_roundup);
580+
567581
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
568582
size_t __ksize(const void *block)
569583
{

0 commit comments

Comments
 (0)