Commit 2b830526 authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Linus Torvalds

kfence, kasan: make KFENCE compatible with KASAN

Make KFENCE compatible with KASAN. Currently this helps test KFENCE
itself, where KASAN can catch potential corruptions to KFENCE state, or
other corruptions that may be a result of freepointer corruptions in the
main allocators.

[akpm@linux-foundation.org: merge fixup]
[andreyknvl@google.com: untag addresses for KFENCE]
  Link: https://lkml.kernel.org/r/9dc196006921b191d25d10f6e611316db7da2efc.1611946152.git.andreyknvl@google.com

Link: https://lkml.kernel.org/r/20201103175841.3495947-7-elver@google.comSigned-off-by: default avatarMarco Elver <elver@google.com>
Signed-off-by: default avatarAlexander Potapenko <glider@google.com>
Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarDmitry Vyukov <dvyukov@google.com>
Reviewed-by: default avatarJann Horn <jannh@google.com>
Co-developed-by: default avatarMarco Elver <elver@google.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joern Engel <joern@purestorage.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: SeongJae Park <sjpark@amazon.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b89fb5ef
...@@ -5,7 +5,7 @@ config HAVE_ARCH_KFENCE ...@@ -5,7 +5,7 @@ config HAVE_ARCH_KFENCE
menuconfig KFENCE menuconfig KFENCE
bool "KFENCE: low-overhead sampling-based memory safety error detector" bool "KFENCE: low-overhead sampling-based memory safety error detector"
depends on HAVE_ARCH_KFENCE && !KASAN && (SLAB || SLUB) depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
select STACKTRACE select STACKTRACE
help help
KFENCE is a low-overhead sampling-based detector of heap out-of-bounds KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
......
...@@ -335,6 +335,9 @@ static bool ____kasan_slab_free(struct kmem_cache *cache, void *object, ...@@ -335,6 +335,9 @@ static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
tagged_object = object; tagged_object = object;
object = kasan_reset_tag(object); object = kasan_reset_tag(object);
if (is_kfence_address(object))
return false;
if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
object)) { object)) {
kasan_report_invalid_free(tagged_object, ip); kasan_report_invalid_free(tagged_object, ip);
...@@ -413,6 +416,9 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, ...@@ -413,6 +416,9 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
if (unlikely(object == NULL)) if (unlikely(object == NULL))
return NULL; return NULL;
if (is_kfence_address(kasan_reset_tag(object)))
return (void *)object;
redzone_start = round_up((unsigned long)(object + size), redzone_start = round_up((unsigned long)(object + size),
KASAN_GRANULE_SIZE); KASAN_GRANULE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size, redzone_end = round_up((unsigned long)object + cache->object_size,
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kfence.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/memblock.h> #include <linux/memblock.h>
...@@ -331,7 +332,7 @@ void kasan_record_aux_stack(void *addr) ...@@ -331,7 +332,7 @@ void kasan_record_aux_stack(void *addr)
struct kasan_alloc_meta *alloc_meta; struct kasan_alloc_meta *alloc_meta;
void *object; void *object;
if (!(page && PageSlab(page))) if (is_kfence_address(addr) || !(page && PageSlab(page)))
return; return;
cache = page->slab_cache; cache = page->slab_cache;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define __MM_KASAN_KASAN_H #define __MM_KASAN_KASAN_H
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/kfence.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
...@@ -331,14 +332,28 @@ static inline u8 kasan_random_tag(void) { return 0; } ...@@ -331,14 +332,28 @@ static inline u8 kasan_random_tag(void) { return 0; }
static inline void kasan_poison(const void *address, size_t size, u8 value) static inline void kasan_poison(const void *address, size_t size, u8 value)
{ {
hw_set_mem_tag_range(kasan_reset_tag(address), address = kasan_reset_tag(address);
/* Skip KFENCE memory if called explicitly outside of sl*b. */
if (is_kfence_address(address))
return;
hw_set_mem_tag_range((void *)address,
round_up(size, KASAN_GRANULE_SIZE), value); round_up(size, KASAN_GRANULE_SIZE), value);
} }
static inline void kasan_unpoison(const void *address, size_t size) static inline void kasan_unpoison(const void *address, size_t size)
{ {
hw_set_mem_tag_range(kasan_reset_tag(address), u8 tag = get_tag(address);
round_up(size, KASAN_GRANULE_SIZE), get_tag(address));
address = kasan_reset_tag(address);
/* Skip KFENCE memory if called explicitly outside of sl*b. */
if (is_kfence_address(address))
return;
hw_set_mem_tag_range((void *)address,
round_up(size, KASAN_GRANULE_SIZE), tag);
} }
static inline bool kasan_byte_accessible(const void *addr) static inline bool kasan_byte_accessible(const void *addr)
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kfence.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -84,6 +85,10 @@ void kasan_poison(const void *address, size_t size, u8 value) ...@@ -84,6 +85,10 @@ void kasan_poison(const void *address, size_t size, u8 value)
address = kasan_reset_tag(address); address = kasan_reset_tag(address);
size = round_up(size, KASAN_GRANULE_SIZE); size = round_up(size, KASAN_GRANULE_SIZE);
/* Skip KFENCE memory if called explicitly outside of sl*b. */
if (is_kfence_address(address))
return;
shadow_start = kasan_mem_to_shadow(address); shadow_start = kasan_mem_to_shadow(address);
shadow_end = kasan_mem_to_shadow(address + size); shadow_end = kasan_mem_to_shadow(address + size);
...@@ -102,6 +107,14 @@ void kasan_unpoison(const void *address, size_t size) ...@@ -102,6 +107,14 @@ void kasan_unpoison(const void *address, size_t size)
*/ */
address = kasan_reset_tag(address); address = kasan_reset_tag(address);
/*
* Skip KFENCE memory if called explicitly outside of sl*b. Also note
* that calls to ksize(), where size is not a multiple of machine-word
* size, would otherwise poison the invalid portion of the word.
*/
if (is_kfence_address(address))
return;
kasan_poison(address, size, tag); kasan_poison(address, size, tag);
if (size & KASAN_GRANULE_MASK) { if (size & KASAN_GRANULE_MASK) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment