Commit 61766652 authored by Vlastimil Babka's avatar Vlastimil Babka

Merge branch 'slab/for-6.2/kmalloc_redzone' into slab/for-next

Add a new slub_kunit test for the extended kmalloc redzone check, by
Feng Tang. Also prevent unwanted kfence interaction with all slub kunit
tests.
parents b5e72d27 6cd6d33c
...@@ -9,10 +9,25 @@ ...@@ -9,10 +9,25 @@
static struct kunit_resource resource; static struct kunit_resource resource;
static int slab_errors; static int slab_errors;
/*
* Wrapper function for kmem_cache_create(), which reduces 2 parameters:
* 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
* object from kfence pool, where the operation could be caught by both
* our test and kfence sanity check.
*/
static struct kmem_cache *test_kmem_cache_create(const char *name,
unsigned int size, slab_flags_t flags)
{
struct kmem_cache *s = kmem_cache_create(name, size, 0,
(flags | SLAB_NO_USER_FLAGS), NULL);
s->flags |= SLAB_SKIP_KFENCE;
return s;
}
static void test_clobber_zone(struct kunit *test) static void test_clobber_zone(struct kunit *test)
{ {
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0, struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); SLAB_RED_ZONE);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current(); kasan_disable_current();
...@@ -29,8 +44,8 @@ static void test_clobber_zone(struct kunit *test) ...@@ -29,8 +44,8 @@ static void test_clobber_zone(struct kunit *test)
#ifndef CONFIG_KASAN #ifndef CONFIG_KASAN
static void test_next_pointer(struct kunit *test) static void test_next_pointer(struct kunit *test)
{ {
struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0, struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
unsigned long tmp; unsigned long tmp;
unsigned long *ptr_addr; unsigned long *ptr_addr;
...@@ -74,8 +89,8 @@ static void test_next_pointer(struct kunit *test) ...@@ -74,8 +89,8 @@ static void test_next_pointer(struct kunit *test)
static void test_first_word(struct kunit *test) static void test_first_word(struct kunit *test)
{ {
struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0, struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p); kmem_cache_free(s, p);
...@@ -89,8 +104,8 @@ static void test_first_word(struct kunit *test) ...@@ -89,8 +104,8 @@ static void test_first_word(struct kunit *test)
static void test_clobber_50th_byte(struct kunit *test) static void test_clobber_50th_byte(struct kunit *test)
{ {
struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0, struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p); kmem_cache_free(s, p);
...@@ -105,8 +120,8 @@ static void test_clobber_50th_byte(struct kunit *test) ...@@ -105,8 +120,8 @@ static void test_clobber_50th_byte(struct kunit *test)
static void test_clobber_redzone_free(struct kunit *test) static void test_clobber_redzone_free(struct kunit *test)
{ {
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0, struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); SLAB_RED_ZONE);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current(); kasan_disable_current();
...@@ -120,6 +135,27 @@ static void test_clobber_redzone_free(struct kunit *test) ...@@ -120,6 +135,27 @@ static void test_clobber_redzone_free(struct kunit *test)
kmem_cache_destroy(s); kmem_cache_destroy(s);
} }
static void test_kmalloc_redzone_access(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
kasan_disable_current();
/* Suppress the -Warray-bounds warning */
OPTIMIZER_HIDE_VAR(p);
p[18] = 0xab;
p[19] = 0xab;
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 2, slab_errors);
kasan_enable_current();
kmem_cache_free(s, p);
kmem_cache_destroy(s);
}
static int test_init(struct kunit *test) static int test_init(struct kunit *test)
{ {
slab_errors = 0; slab_errors = 0;
...@@ -139,6 +175,7 @@ static struct kunit_case test_cases[] = { ...@@ -139,6 +175,7 @@ static struct kunit_case test_cases[] = {
#endif #endif
KUNIT_CASE(test_clobber_redzone_free), KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access),
{} {}
}; };
......
...@@ -346,7 +346,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, ...@@ -346,7 +346,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
SLAB_ACCOUNT) SLAB_ACCOUNT)
#elif defined(CONFIG_SLUB) #elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | SLAB_ACCOUNT | SLAB_NO_USER_FLAGS) SLAB_TEMPORARY | SLAB_ACCOUNT | \
SLAB_NO_USER_FLAGS | SLAB_KMALLOC)
#else #else
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
#endif #endif
...@@ -366,6 +367,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, ...@@ -366,6 +367,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
SLAB_RECLAIM_ACCOUNT | \ SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | \ SLAB_TEMPORARY | \
SLAB_ACCOUNT | \ SLAB_ACCOUNT | \
SLAB_KMALLOC | \
SLAB_NO_USER_FLAGS) SLAB_NO_USER_FLAGS)
bool __kmem_cache_empty(struct kmem_cache *); bool __kmem_cache_empty(struct kmem_cache *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment