Commit 445d41d7 authored by Vlastimil Babka's avatar Vlastimil Babka

Merge branch 'slab/for-6.1/kmalloc_size_roundup' into slab/for-next

The first two patches from a series by Kees Cook [1] that introduce
kmalloc_size_roundup(). This will allow merging of per-subsystem patches using
the new function and ultimately stop (ab)using ksize() in a way that causes
ongoing trouble for debugging functionality and static checkers.

[1] https://lore.kernel.org/all/20220923202822.2667581-1-keescook@chromium.org/

--
Resolved a conflict of modifying mm/slab.c __ksize() comment with a commit that
unifies __ksize() implementation into mm/slab_common.c
parents af961f80 05a94065
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
/* /*
* Note: do not use this directly. Instead, use __alloc_size() since it is conditionally * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
* available and includes other attributes. * available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
* in compiler-gcc.h, due to misbehaviors.
* *
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
......
...@@ -271,14 +271,16 @@ struct ftrace_likely_data { ...@@ -271,14 +271,16 @@ struct ftrace_likely_data {
/* /*
* Any place that could be marked with the "alloc_size" attribute is also * Any place that could be marked with the "alloc_size" attribute is also
* a place to be marked with the "malloc" attribute. Do this as part of the * a place to be marked with the "malloc" attribute, except those that may
* __alloc_size macro to avoid redundant attributes and to avoid missing a * be performing a _reallocation_, as that may alias the existing pointer.
* __malloc marking. * For these, use __realloc_size().
*/ */
#ifdef __alloc_size__ #ifdef __alloc_size__
# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc # define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
#else #else
# define __alloc_size(x, ...) __malloc # define __alloc_size(x, ...) __malloc
# define __realloc_size(x, ...)
#endif #endif
#ifndef asm_volatile_goto #ifndef asm_volatile_goto
......
...@@ -186,10 +186,25 @@ int kmem_cache_shrink(struct kmem_cache *s); ...@@ -186,10 +186,25 @@ int kmem_cache_shrink(struct kmem_cache *s);
/* /*
* Common kmalloc functions provided by all allocators * Common kmalloc functions provided by all allocators
*/ */
void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2); void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
void kfree(const void *objp); void kfree(const void *objp);
void kfree_sensitive(const void *objp); void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
/**
* ksize - Report actual allocation size of associated object
*
* @objp: Pointer returned from a prior kmalloc()-family allocation.
*
* This should not be used for writing beyond the originally requested
* allocation size. Either use krealloc() or round up the allocation size
* with kmalloc_size_roundup() prior to allocation. If this is used to
* access beyond the originally requested allocation size, UBSAN_BOUNDS
* and/or FORTIFY_SOURCE may trip, since they only know about the
* originally allocated size via the __alloc_size attribute.
*/
size_t ksize(const void *objp); size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
bool kmem_valid_obj(void *object); bool kmem_valid_obj(void *object);
void kmem_dump_obj(void *object); void kmem_dump_obj(void *object);
...@@ -614,10 +629,10 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_ ...@@ -614,10 +629,10 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
* @new_size: new size of a single member of the array * @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc) * @flags: the type of memory to allocate (see kmalloc)
*/ */
static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p, static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
size_t new_n, size_t new_n,
size_t new_size, size_t new_size,
gfp_t flags) gfp_t flags)
{ {
size_t bytes; size_t bytes;
...@@ -732,11 +747,28 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla ...@@ -732,11 +747,28 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
} }
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__alloc_size(3); __realloc_size(3);
extern void kvfree(const void *addr); extern void kvfree(const void *addr);
extern void kvfree_sensitive(const void *addr, size_t len); extern void kvfree_sensitive(const void *addr, size_t len);
unsigned int kmem_cache_size(struct kmem_cache *s); unsigned int kmem_cache_size(struct kmem_cache *s);
/**
* kmalloc_size_roundup - Report allocation bucket size for the given size
*
* @size: Number of bytes to round up from.
*
* This returns the number of bytes that would be available in a kmalloc()
* allocation of @size bytes. For example, a 126 byte request would be
* rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
* for the general-purpose kmalloc()-based allocations, and is not for the
* pre-sized kmem_cache_alloc()-based allocations.)
*
* Use this to kmalloc() the full bucket size ahead of time instead of using
* ksize() to query the size after an allocation.
*/
size_t kmalloc_size_roundup(size_t size);
void __init kmem_cache_init_late(void); void __init kmem_cache_init_late(void);
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB) #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
......
...@@ -734,6 +734,26 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) ...@@ -734,6 +734,26 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
return kmalloc_caches[kmalloc_type(flags)][index]; return kmalloc_caches[kmalloc_type(flags)][index];
} }
size_t kmalloc_size_roundup(size_t size)
{
struct kmem_cache *c;
/* Short-circuit the 0 size case. */
if (unlikely(size == 0))
return 0;
/* Short-circuit saturated "too-large" case. */
if (unlikely(size == SIZE_MAX))
return SIZE_MAX;
/* Above the smaller buckets, size is a multiple of page size. */
if (size > KMALLOC_MAX_CACHE_SIZE)
return PAGE_SIZE << get_order(size);
/* The flags don't matter since size_index is common to all. */
c = kmalloc_slab(size, GFP_KERNEL);
return c ? c->object_size : 0;
}
EXPORT_SYMBOL(kmalloc_size_roundup);
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz, #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
#else #else
...@@ -987,7 +1007,18 @@ void kfree(const void *object) ...@@ -987,7 +1007,18 @@ void kfree(const void *object)
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
/* Uninstrumented ksize. Only called by KASAN. */ /**
* __ksize -- Report full size of underlying allocation
* @objp: pointer to the object
*
* This should only be used internally to query the true size of allocations.
* It is not meant to be a way to discover the usable size of an allocation
* after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
* and/or FORTIFY_SOURCE.
*
* Return: size of the actual memory used by @objp in bytes
*/
size_t __ksize(const void *object) size_t __ksize(const void *object)
{ {
struct folio *folio; struct folio *folio;
...@@ -1294,8 +1325,8 @@ module_init(slab_proc_init); ...@@ -1294,8 +1325,8 @@ module_init(slab_proc_init);
#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */ #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
static __always_inline void *__do_krealloc(const void *p, size_t new_size, static __always_inline __realloc_size(2) void *
gfp_t flags) __do_krealloc(const void *p, size_t new_size, gfp_t flags)
{ {
void *ret; void *ret;
size_t ks; size_t ks;
......
...@@ -564,6 +564,20 @@ void kfree(const void *block) ...@@ -564,6 +564,20 @@ void kfree(const void *block)
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
size_t kmalloc_size_roundup(size_t size)
{
/* Short-circuit the 0 size case. */
if (unlikely(size == 0))
return 0;
/* Short-circuit saturated "too-large" case. */
if (unlikely(size == SIZE_MAX))
return SIZE_MAX;
return ALIGN(size, ARCH_KMALLOC_MINALIGN);
}
EXPORT_SYMBOL(kmalloc_size_roundup);
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */ /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t __ksize(const void *block) size_t __ksize(const void *block)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment