Commit 07e8481d authored by Marco Elver's avatar Marco Elver Committed by Linus Torvalds

kfence: always use static branches to guard kfence_alloc()

Regardless of KFENCE mode (CONFIG_KFENCE_STATIC_KEYS: either using
static keys to gate allocations, or using a simple dynamic branch),
always use a static branch to avoid the dynamic branch in kfence_alloc()
if KFENCE was disabled at boot.

For CONFIG_KFENCE_STATIC_KEYS=n, this now avoids the dynamic branch if
KFENCE was disabled at boot.

To simplify, also unifies the location where kfence_allocation_gate is
read-checked to just be inline in kfence_alloc().

Link: https://lkml.kernel.org/r/20211019102524.2807208-1-elver@google.comSigned-off-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 49332956
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#ifdef CONFIG_KFENCE #ifdef CONFIG_KFENCE
#include <linux/atomic.h>
#include <linux/static_key.h>
/* /*
* We allocate an even number of pages, as it simplifies calculations to map * We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an * address to metadata indices; effectively, the very first page serves as an
...@@ -22,13 +25,8 @@ ...@@ -22,13 +25,8 @@
#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
extern char *__kfence_pool; extern char *__kfence_pool;
#ifdef CONFIG_KFENCE_STATIC_KEYS
#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
#else
#include <linux/atomic.h>
extern atomic_t kfence_allocation_gate; extern atomic_t kfence_allocation_gate;
#endif
/** /**
* is_kfence_address() - check if an address belongs to KFENCE pool * is_kfence_address() - check if an address belongs to KFENCE pool
...@@ -116,13 +114,16 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); ...@@ -116,13 +114,16 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
*/ */
static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{ {
#ifdef CONFIG_KFENCE_STATIC_KEYS #if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
if (static_branch_unlikely(&kfence_allocation_key)) if (!static_branch_unlikely(&kfence_allocation_key))
return NULL;
#else #else
if (unlikely(!atomic_read(&kfence_allocation_gate))) if (!static_branch_likely(&kfence_allocation_key))
return NULL;
#endif #endif
return __kfence_alloc(s, size, flags); if (likely(atomic_read(&kfence_allocation_gate)))
return NULL; return NULL;
return __kfence_alloc(s, size, flags);
} }
/** /**
......
...@@ -104,10 +104,11 @@ struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; ...@@ -104,10 +104,11 @@ struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
#ifdef CONFIG_KFENCE_STATIC_KEYS /*
/* The static key to set up a KFENCE allocation. */ * The static key to set up a KFENCE allocation; or if static keys are not used
* to gate allocations, to avoid a load and compare if KFENCE is disabled.
*/
DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
#endif
/* Gates the allocation, ensuring only one succeeds in a given period. */ /* Gates the allocation, ensuring only one succeeds in a given period. */
atomic_t kfence_allocation_gate = ATOMIC_INIT(1); atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
...@@ -774,6 +775,8 @@ void __init kfence_init(void) ...@@ -774,6 +775,8 @@ void __init kfence_init(void)
return; return;
} }
if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
static_branch_enable(&kfence_allocation_key);
WRITE_ONCE(kfence_enabled, true); WRITE_ONCE(kfence_enabled, true);
queue_delayed_work(system_unbound_wq, &kfence_timer, 0); queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
...@@ -866,12 +869,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) ...@@ -866,12 +869,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
return NULL; return NULL;
} }
/* if (atomic_inc_return(&kfence_allocation_gate) > 1)
* allocation_gate only needs to become non-zero, so it doesn't make
* sense to continue writing to it and pay the associated contention
* cost, in case we have a large number of concurrent allocations.
*/
if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
return NULL; return NULL;
#ifdef CONFIG_KFENCE_STATIC_KEYS #ifdef CONFIG_KFENCE_STATIC_KEYS
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment