Commit 682ed089 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

kasan: only define kasan_cache_create for Generic mode

Right now, kasan_cache_create() assigns SLAB_KASAN for all KASAN modes and
then sets up metadata-related cache parameters for the Generic mode.

SLAB_KASAN is used in two places:

1. In slab_ksize() to account for per-object metadata when
   calculating the size of the accessible memory within the object.
2. In slab_common.c via kasan_never_merge() to prevent merging of
   caches with per-object metadata.

Both cases are only relevant when per-object metadata is present, which is
only the case with the Generic mode.

Thus, assign SLAB_KASAN and define kasan_cache_create() only for the
Generic mode.

Also update the SLAB_KASAN-related comment.

Link: https://lkml.kernel.org/r/61faa2aa1906e2d02c97d00ddf99ce8911dda095.1662411799.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Peter Collingbourne <pcc@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent be95e13f
...@@ -128,15 +128,6 @@ static __always_inline void kasan_unpoison_pages(struct page *page, ...@@ -128,15 +128,6 @@ static __always_inline void kasan_unpoison_pages(struct page *page,
__kasan_unpoison_pages(page, order, init); __kasan_unpoison_pages(page, order, init);
} }
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
static __always_inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size, slab_flags_t *flags)
{
if (kasan_enabled())
__kasan_cache_create(cache, size, flags);
}
void __kasan_cache_create_kmalloc(struct kmem_cache *cache); void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
{ {
...@@ -260,9 +251,6 @@ static inline void kasan_poison_pages(struct page *page, unsigned int order, ...@@ -260,9 +251,6 @@ static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {} bool init) {}
static inline void kasan_unpoison_pages(struct page *page, unsigned int order, static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
bool init) {} bool init) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct slab *slab) {} static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache, static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
...@@ -316,6 +304,8 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {} ...@@ -316,6 +304,8 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
size_t kasan_metadata_size(struct kmem_cache *cache); size_t kasan_metadata_size(struct kmem_cache *cache);
slab_flags_t kasan_never_merge(void); slab_flags_t kasan_never_merge(void);
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache);
...@@ -334,6 +324,10 @@ static inline slab_flags_t kasan_never_merge(void) ...@@ -334,6 +324,10 @@ static inline slab_flags_t kasan_never_merge(void)
{ {
return 0; return 0;
} }
/* And no cache-related metadata initialization is required. */
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
......
...@@ -106,7 +106,7 @@ ...@@ -106,7 +106,7 @@
# define SLAB_ACCOUNT 0 # define SLAB_ACCOUNT 0
#endif #endif
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN_GENERIC
#define SLAB_KASAN ((slab_flags_t __force)0x08000000U) #define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
#else #else
#define SLAB_KASAN 0 #define SLAB_KASAN 0
......
...@@ -110,22 +110,6 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init) ...@@ -110,22 +110,6 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
KASAN_PAGE_FREE, init); KASAN_PAGE_FREE, init);
} }
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags)
{
/*
* SLAB_KASAN is used to mark caches as ones that are sanitized by
* KASAN. Currently this flag is used in two places:
* 1. In slab_ksize() when calculating the size of the accessible
* memory within the object.
* 2. In slab_common.c to prevent merging of sanitized caches.
*/
*flags |= SLAB_KASAN;
if (kasan_requires_meta())
kasan_init_cache_meta(cache, size);
}
void __kasan_cache_create_kmalloc(struct kmem_cache *cache) void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
{ {
cache->kasan_info.is_kmalloc = true; cache->kasan_info.is_kmalloc = true;
......
...@@ -352,11 +352,26 @@ static inline unsigned int optimal_redzone(unsigned int object_size) ...@@ -352,11 +352,26 @@ static inline unsigned int optimal_redzone(unsigned int object_size)
object_size <= (1 << 16) - 1024 ? 1024 : 2048; object_size <= (1 << 16) - 1024 ? 1024 : 2048;
} }
void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags)
{ {
unsigned int ok_size; unsigned int ok_size;
unsigned int optimal_size; unsigned int optimal_size;
if (!kasan_requires_meta())
return;
/*
* SLAB_KASAN is used to mark caches that are sanitized by KASAN
* and that thus have per-object metadata.
* Currently this flag is used in two places:
* 1. In slab_ksize() to account for per-object metadata when
* calculating the size of the accessible memory within the object.
* 2. In slab_common.c via kasan_never_merge() to prevent merging of
* caches with per-object metadata.
*/
*flags |= SLAB_KASAN;
ok_size = *size; ok_size = *size;
/* Add alloc meta into redzone. */ /* Add alloc meta into redzone. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment