Commit 67f2df3b authored by Kees Cook's avatar Kees Cook Committed by Vlastimil Babka

mm/slab: Plumb kmem_buckets into __do_kmalloc_node()

Introduce CONFIG_SLAB_BUCKETS which provides the infrastructure to
support separated kmalloc buckets (in the following kmem_buckets_create()
patches and future codetag-based separation). Since this will provide
a mitigation for a very common case of exploits, it is recommended to
enable this feature for general purpose distros. By default, the new
Kconfig will be enabled if CONFIG_SLAB_FREELIST_HARDENED is enabled (and
it is added to the hardening.config Kconfig fragment).

To be able to choose which buckets to allocate from, make the buckets
available to the internal kmalloc interfaces by adding them as the
second argument, rather than depending on the buckets being chosen from
the fixed set of global buckets. Where the bucket is not available,
pass NULL, which means "use the default system kmalloc bucket set"
(the prior existing behavior), as implemented in kmalloc_slab().

To avoid adding the extra argument when !CONFIG_SLAB_BUCKETS, only the
top-level macros and static inlines use the buckets argument (where
they are stripped out and compiled out respectively). The actual extern
functions can then be built without the argument, and the internals
fall back to the global kmalloc buckets unconditionally.
Co-developed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarKees Cook <kees@kernel.org>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 72e0fe22
...@@ -570,6 +570,21 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, ...@@ -570,6 +570,21 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
int node) __assume_slab_alignment __malloc; int node) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) #define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
/*
* These macros allow declaring a kmem_buckets * parameter alongside size, which
* can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
* sites don't have to pass NULL.
*/
#ifdef CONFIG_SLAB_BUCKETS
#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b)
#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b)
#define PASS_BUCKET_PARAM(_b) (_b)
#else
#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size)
#define PASS_BUCKET_PARAMS(_size, _b) (_size)
#define PASS_BUCKET_PARAM(_b) NULL
#endif
/* /*
* The following functions are not to be used directly and are intended only * The following functions are not to be used directly and are intended only
* for internal use from kmalloc() and kmalloc_node() * for internal use from kmalloc() and kmalloc_node()
...@@ -579,7 +594,7 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, ...@@ -579,7 +594,7 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
void *__kmalloc_noprof(size_t size, gfp_t flags) void *__kmalloc_noprof(size_t size, gfp_t flags)
__assume_kmalloc_alignment __alloc_size(1); __assume_kmalloc_alignment __alloc_size(1);
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
__assume_kmalloc_alignment __alloc_size(1); __assume_kmalloc_alignment __alloc_size(1);
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size) void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
...@@ -680,7 +695,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf ...@@ -680,7 +695,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, node, size); flags, node, size);
} }
return __kmalloc_node_noprof(size, flags, node); return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
} }
#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__)) #define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
...@@ -731,8 +746,10 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(voi ...@@ -731,8 +746,10 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(voi
*/ */
#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO) #define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node, void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
unsigned long caller) __alloc_size(1); unsigned long caller) __alloc_size(1);
#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
#define kmalloc_node_track_caller(...) \ #define kmalloc_node_track_caller(...) \
alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_)) alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
...@@ -758,7 +775,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_ ...@@ -758,7 +775,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_
return NULL; return NULL;
if (__builtin_constant_p(n) && __builtin_constant_p(size)) if (__builtin_constant_p(n) && __builtin_constant_p(size))
return kmalloc_node_noprof(bytes, flags, node); return kmalloc_node_noprof(bytes, flags, node);
return __kmalloc_node_noprof(bytes, flags, node); return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
} }
#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__)) #define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
......
...@@ -20,6 +20,7 @@ CONFIG_RANDOMIZE_MEMORY=y ...@@ -20,6 +20,7 @@ CONFIG_RANDOMIZE_MEMORY=y
# Randomize allocator freelists, harden metadata. # Randomize allocator freelists, harden metadata.
CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_SLAB_BUCKETS=y
CONFIG_SHUFFLE_PAGE_ALLOCATOR=y CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
CONFIG_RANDOM_KMALLOC_CACHES=y CONFIG_RANDOM_KMALLOC_CACHES=y
......
...@@ -273,6 +273,23 @@ config SLAB_FREELIST_HARDENED ...@@ -273,6 +273,23 @@ config SLAB_FREELIST_HARDENED
sacrifices to harden the kernel slab allocator against common sacrifices to harden the kernel slab allocator against common
freelist exploit methods. freelist exploit methods.
config SLAB_BUCKETS
bool "Support allocation from separate kmalloc buckets"
depends on !SLUB_TINY
default SLAB_FREELIST_HARDENED
help
Kernel heap attacks frequently depend on being able to create
specifically-sized allocations with user-controlled contents
that will be allocated into the same kmalloc bucket as a
target object. To avoid sharing these allocation buckets,
provide an explicitly separated set of buckets to be used for
user-controlled allocations. This may very slightly increase
memory fragmentation, though in practice it's only a handful
of extra pages since the bulk of user-controlled allocations
are relatively long-lived.
If unsure, say Y.
config SLUB_STATS config SLUB_STATS
default n default n
bool "Enable performance statistics" bool "Enable performance statistics"
......
...@@ -403,16 +403,18 @@ static inline unsigned int size_index_elem(unsigned int bytes) ...@@ -403,16 +403,18 @@ static inline unsigned int size_index_elem(unsigned int bytes)
* KMALLOC_MAX_CACHE_SIZE and the caller must check that. * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
*/ */
static inline struct kmem_cache * static inline struct kmem_cache *
kmalloc_slab(size_t size, gfp_t flags, unsigned long caller) kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
{ {
unsigned int index; unsigned int index;
if (!b)
b = &kmalloc_caches[kmalloc_type(flags, caller)];
if (size <= 192) if (size <= 192)
index = kmalloc_size_index[size_index_elem(size)]; index = kmalloc_size_index[size_index_elem(size)];
else else
index = fls(size - 1); index = fls(size - 1);
return kmalloc_caches[kmalloc_type(flags, caller)][index]; return (*b)[index];
} }
gfp_t kmalloc_fix_flags(gfp_t flags); gfp_t kmalloc_fix_flags(gfp_t flags);
......
...@@ -703,7 +703,7 @@ size_t kmalloc_size_roundup(size_t size) ...@@ -703,7 +703,7 @@ size_t kmalloc_size_roundup(size_t size)
* The flags don't matter since size_index is common to all. * The flags don't matter since size_index is common to all.
* Neither does the caller for just getting ->object_size. * Neither does the caller for just getting ->object_size.
*/ */
return kmalloc_slab(size, GFP_KERNEL, 0)->object_size; return kmalloc_slab(size, NULL, GFP_KERNEL, 0)->object_size;
} }
/* Above the smaller buckets, size is a multiple of page size. */ /* Above the smaller buckets, size is a multiple of page size. */
......
...@@ -4117,7 +4117,7 @@ void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) ...@@ -4117,7 +4117,7 @@ void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_large_node_noprof); EXPORT_SYMBOL(__kmalloc_large_node_noprof);
static __always_inline static __always_inline
void *__do_kmalloc_node(size_t size, gfp_t flags, int node, void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
unsigned long caller) unsigned long caller)
{ {
struct kmem_cache *s; struct kmem_cache *s;
...@@ -4133,32 +4133,32 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, ...@@ -4133,32 +4133,32 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
if (unlikely(!size)) if (unlikely(!size))
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
s = kmalloc_slab(size, flags, caller); s = kmalloc_slab(size, b, flags, caller);
ret = slab_alloc_node(s, NULL, flags, node, caller, size); ret = slab_alloc_node(s, NULL, flags, node, caller, size);
ret = kasan_kmalloc(s, ret, size, flags); ret = kasan_kmalloc(s, ret, size, flags);
trace_kmalloc(caller, ret, size, s->size, flags, node); trace_kmalloc(caller, ret, size, s->size, flags, node);
return ret; return ret;
} }
void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
{ {
return __do_kmalloc_node(size, flags, node, _RET_IP_); return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
} }
EXPORT_SYMBOL(__kmalloc_node_noprof); EXPORT_SYMBOL(__kmalloc_node_noprof);
void *__kmalloc_noprof(size_t size, gfp_t flags) void *__kmalloc_noprof(size_t size, gfp_t flags)
{ {
return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_); return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
} }
EXPORT_SYMBOL(__kmalloc_noprof); EXPORT_SYMBOL(__kmalloc_noprof);
void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
int node, unsigned long caller) int node, unsigned long caller)
{ {
return __do_kmalloc_node(size, flags, node, caller); return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
} }
EXPORT_SYMBOL(kmalloc_node_track_caller_noprof); EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{ {
......
...@@ -1729,6 +1729,7 @@ sub dump_function($$) { ...@@ -1729,6 +1729,7 @@ sub dump_function($$) {
$prototype =~ s/__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +//; $prototype =~ s/__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +//;
$prototype =~ s/__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +//; $prototype =~ s/__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +//;
$prototype =~ s/__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +//; $prototype =~ s/__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +//;
$prototype =~ s/DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)/$1, $2/;
my $define = $prototype =~ s/^#\s*define\s+//; #ak added my $define = $prototype =~ s/^#\s*define\s+//; #ak added
$prototype =~ s/__attribute_const__ +//; $prototype =~ s/__attribute_const__ +//;
$prototype =~ s/__attribute__\s*\(\( $prototype =~ s/__attribute__\s*\(\(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment