Commit 1375b980 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge KASAN updates from Andrew Morton.

This adds a new hardware tag-based mode to KASAN.  The new mode is
similar to the existing software tag-based KASAN, but relies on arm64
Memory Tagging Extension (MTE) to perform memory and pointer tagging
(instead of shadow memory and compiler instrumentation).

By Andrey Konovalov and Vincenzo Frascino.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (60 commits)
  kasan: update documentation
  kasan, mm: allow cache merging with no metadata
  kasan: sanitize objects when metadata doesn't fit
  kasan: clarify comment in __kasan_kfree_large
  kasan: simplify assign_tag and set_tag calls
  kasan: don't round_up too much
  kasan, mm: rename kasan_poison_kfree
  kasan, mm: check kasan_enabled in annotations
  kasan: add and integrate kasan boot parameters
  kasan: inline (un)poison_range and check_invalid_free
  kasan: open-code kasan_unpoison_slab
  kasan: inline random_tag for HW_TAGS
  kasan: inline kasan_reset_tag for tag-based modes
  kasan: remove __kasan_unpoison_stack
  kasan: allow VMAP_STACK for HW_TAGS mode
  kasan, arm64: unpoison stack only with CONFIG_KASAN_STACK
  kasan: introduce set_alloc_info
  kasan: rename get_alloc/free_info
  kasan: simplify quarantine_put call site
  kselftest/arm64: check GCR_EL1 after context switch
  ...
parents c45647f9 625d8673
This diff is collapsed.
...@@ -976,16 +976,16 @@ config VMAP_STACK ...@@ -976,16 +976,16 @@ config VMAP_STACK
default y default y
bool "Use a virtually-mapped stack" bool "Use a virtually-mapped stack"
depends on HAVE_ARCH_VMAP_STACK depends on HAVE_ARCH_VMAP_STACK
depends on !KASAN || KASAN_VMALLOC depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
help help
Enable this if you want the use virtually-mapped kernel stacks Enable this if you want the use virtually-mapped kernel stacks
with guard pages. This causes kernel stack overflows to be with guard pages. This causes kernel stack overflows to be
caught immediately rather than causing difficult-to-diagnose caught immediately rather than causing difficult-to-diagnose
corruption. corruption.
To use this with KASAN, the architecture must support backing To use this with software KASAN modes, the architecture must support
virtual mappings with real shadow memory, and KASAN_VMALLOC must backing virtual mappings with real shadow memory, and KASAN_VMALLOC
be enabled. must be enabled.
config ARCH_OPTIONAL_KERNEL_RWX config ARCH_OPTIONAL_KERNEL_RWX
def_bool n def_bool n
......
...@@ -137,6 +137,7 @@ config ARM64 ...@@ -137,6 +137,7 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
...@@ -334,7 +335,7 @@ config BROKEN_GAS_INST ...@@ -334,7 +335,7 @@ config BROKEN_GAS_INST
config KASAN_SHADOW_OFFSET config KASAN_SHADOW_OFFSET
hex hex
depends on KASAN depends on KASAN_GENERIC || KASAN_SW_TAGS
default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
...@@ -1571,6 +1572,9 @@ endmenu ...@@ -1571,6 +1572,9 @@ endmenu
menu "ARMv8.5 architectural features" menu "ARMv8.5 architectural features"
config AS_HAS_ARMV8_5
def_bool $(cc-option,-Wa$(comma)-march=armv8.5-a)
config ARM64_BTI config ARM64_BTI
bool "Branch Target Identification support" bool "Branch Target Identification support"
default y default y
...@@ -1645,6 +1649,9 @@ config ARM64_MTE ...@@ -1645,6 +1649,9 @@ config ARM64_MTE
bool "Memory Tagging Extension support" bool "Memory Tagging Extension support"
default y default y
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
depends on AS_HAS_ARMV8_5
# Required for tag checking in the uaccess routines
depends on ARM64_PAN
select ARCH_USES_HIGH_VMA_FLAGS select ARCH_USES_HIGH_VMA_FLAGS
help help
Memory Tagging (part of the ARMv8.5 Extensions) provides Memory Tagging (part of the ARMv8.5 Extensions) provides
......
...@@ -96,6 +96,11 @@ ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) ...@@ -96,6 +96,11 @@ ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
asm-arch := armv8.4-a asm-arch := armv8.4-a
endif endif
ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
# make sure to pass the newest target architecture to -march.
asm-arch := armv8.5-a
endif
ifdef asm-arch ifdef asm-arch
KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \ KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
-DARM64_ASM_ARCH='"$(asm-arch)"' -DARM64_ASM_ARCH='"$(asm-arch)"'
...@@ -132,7 +137,7 @@ head-y := arch/arm64/kernel/head.o ...@@ -132,7 +137,7 @@ head-y := arch/arm64/kernel/head.o
ifeq ($(CONFIG_KASAN_SW_TAGS), y) ifeq ($(CONFIG_KASAN_SW_TAGS), y)
KASAN_SHADOW_SCALE_SHIFT := 4 KASAN_SHADOW_SCALE_SHIFT := 4
else else ifeq ($(CONFIG_KASAN_GENERIC), y)
KASAN_SHADOW_SCALE_SHIFT := 3 KASAN_SHADOW_SCALE_SHIFT := 3
endif endif
......
...@@ -473,7 +473,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU ...@@ -473,7 +473,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#define NOKPROBE(x) #define NOKPROBE(x)
#endif #endif
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define EXPORT_SYMBOL_NOKASAN(name) #define EXPORT_SYMBOL_NOKASAN(name)
#else #else
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define __ASM_CACHE_H #define __ASM_CACHE_H
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/mte-kasan.h>
#define CTR_L1IP_SHIFT 14 #define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3 #define CTR_L1IP_MASK 3
...@@ -51,6 +52,8 @@ ...@@ -51,6 +52,8 @@
#ifdef CONFIG_KASAN_SW_TAGS #ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS)
#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -106,6 +106,7 @@ ...@@ -106,6 +106,7 @@
#define ESR_ELx_FSC_TYPE (0x3C) #define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_LEVEL (0x03) #define ESR_ELx_FSC_LEVEL (0x03)
#define ESR_ELx_FSC_EXTABT (0x10) #define ESR_ELx_FSC_EXTABT (0x10)
#define ESR_ELx_FSC_MTE (0x11)
#define ESR_ELx_FSC_SERROR (0x11) #define ESR_ELx_FSC_SERROR (0x11)
#define ESR_ELx_FSC_ACCESS (0x08) #define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04) #define ESR_ELx_FSC_FAULT (0x04)
......
...@@ -12,7 +12,9 @@ ...@@ -12,7 +12,9 @@
#define arch_kasan_reset_tag(addr) __tag_reset(addr) #define arch_kasan_reset_tag(addr) __tag_reset(addr)
#define arch_kasan_get_tag(addr) __tag_get(addr) #define arch_kasan_get_tag(addr) __tag_get(addr)
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_init(void);
/* /*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses. * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
...@@ -33,7 +35,6 @@ ...@@ -33,7 +35,6 @@
#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT))) #define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual) #define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
void kasan_init(void);
void kasan_copy_shadow(pgd_t *pgdir); void kasan_copy_shadow(pgd_t *pgdir);
asmlinkage void kasan_early_init(void); asmlinkage void kasan_early_init(void);
......
...@@ -72,7 +72,7 @@ ...@@ -72,7 +72,7 @@
* address space for the shadow region respectively. They can bloat the stack * address space for the shadow region respectively. They can bloat the stack
* significantly, so double the (minimum) stack size when they are in use. * significantly, so double the (minimum) stack size when they are in use.
*/ */
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
+ KASAN_SHADOW_OFFSET) + KASAN_SHADOW_OFFSET)
...@@ -214,7 +214,7 @@ static inline unsigned long kaslr_offset(void) ...@@ -214,7 +214,7 @@ static inline unsigned long kaslr_offset(void)
(__force __typeof__(addr))__addr; \ (__force __typeof__(addr))__addr; \
}) })
#ifdef CONFIG_KASAN_SW_TAGS #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
#define __tag_shifted(tag) ((u64)(tag) << 56) #define __tag_shifted(tag) ((u64)(tag) << 56)
#define __tag_reset(addr) __untagged_addr(addr) #define __tag_reset(addr) __untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56) #define __tag_get(addr) (__u8)((u64)(addr) >> 56)
...@@ -222,7 +222,7 @@ static inline unsigned long kaslr_offset(void) ...@@ -222,7 +222,7 @@ static inline unsigned long kaslr_offset(void)
#define __tag_shifted(tag) 0UL #define __tag_shifted(tag) 0UL
#define __tag_reset(addr) (addr) #define __tag_reset(addr) (addr)
#define __tag_get(addr) 0 #define __tag_get(addr) 0
#endif /* CONFIG_KASAN_SW_TAGS */ #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline const void *__tag_set(const void *addr, u8 tag) static inline const void *__tag_set(const void *addr, u8 tag)
{ {
...@@ -230,6 +230,15 @@ static inline const void *__tag_set(const void *addr, u8 tag) ...@@ -230,6 +230,15 @@ static inline const void *__tag_set(const void *addr, u8 tag)
return (const void *)(__addr | __tag_shifted(tag)); return (const void *)(__addr | __tag_shifted(tag));
} }
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging() mte_enable_kernel()
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
#define arch_set_mem_tag_range(addr, size, tag) \
mte_set_mem_tag_range((addr), (size), (tag))
#endif /* CONFIG_KASAN_HW_TAGS */
/* /*
* Physical vs virtual RAM address space conversion. These are * Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h * private definitions which should NOT be used outside memory.h
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 ARM Ltd.
*/
#ifndef __ASM_MTE_DEF_H
#define __ASM_MTE_DEF_H
#define MTE_GRANULE_SIZE UL(16)
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
#define MTE_TAG_SHIFT 56
#define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
#endif /* __ASM_MTE_DEF_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 ARM Ltd.
*/
#ifndef __ASM_MTE_KASAN_H
#define __ASM_MTE_KASAN_H
#include <asm/mte-def.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* The functions below are meant to be used only for the
* KASAN_HW_TAGS interface defined in asm/memory.h.
*/
#ifdef CONFIG_ARM64_MTE
static inline u8 mte_get_ptr_tag(void *ptr)
{
/* Note: The format of KASAN tags is 0xF<x> */
u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
return tag;
}
u8 mte_get_mem_tag(void *addr);
u8 mte_get_random_tag(void);
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
void mte_enable_kernel(void);
void mte_init_tags(u64 max_tag);
#else /* CONFIG_ARM64_MTE */
static inline u8 mte_get_ptr_tag(void *ptr)
{
return 0xFF;
}
static inline u8 mte_get_mem_tag(void *addr)
{
return 0xFF;
}
static inline u8 mte_get_random_tag(void)
{
return 0xFF;
}
static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
return addr;
}
static inline void mte_enable_kernel(void)
{
}
static inline void mte_init_tags(u64 max_tag)
{
}
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_MTE_KASAN_H */
...@@ -5,17 +5,21 @@ ...@@ -5,17 +5,21 @@
#ifndef __ASM_MTE_H #ifndef __ASM_MTE_H
#define __ASM_MTE_H #define __ASM_MTE_H
#define MTE_GRANULE_SIZE UL(16) #include <asm/compiler.h>
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1)) #include <asm/mte-def.h>
#define MTE_TAG_SHIFT 56
#define MTE_TAG_SIZE 4 #define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/bitfield.h>
#include <linux/page-flags.h> #include <linux/page-flags.h>
#include <linux/types.h>
#include <asm/pgtable-types.h> #include <asm/pgtable-types.h>
extern u64 gcr_kernel_excl;
void mte_clear_page_tags(void *addr); void mte_clear_page_tags(void *addr);
unsigned long mte_copy_tags_from_user(void *to, const void __user *from, unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
unsigned long n); unsigned long n);
...@@ -45,7 +49,9 @@ long get_mte_ctrl(struct task_struct *task); ...@@ -45,7 +49,9 @@ long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request, int mte_ptrace_copy_tags(struct task_struct *child, long request,
unsigned long addr, unsigned long data); unsigned long addr, unsigned long data);
#else void mte_assign_mem_tag_range(void *addr, size_t size);
#else /* CONFIG_ARM64_MTE */
/* unused if !CONFIG_ARM64_MTE, silence the compiler */ /* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged 0 #define PG_mte_tagged 0
...@@ -80,7 +86,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child, ...@@ -80,7 +86,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
return -EIO; return -EIO;
} }
#endif static inline void mte_assign_mem_tag_range(void *addr, size_t size)
{
}
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_MTE_H */ #endif /* __ASM_MTE_H */
...@@ -152,7 +152,7 @@ struct thread_struct { ...@@ -152,7 +152,7 @@ struct thread_struct {
#endif #endif
#ifdef CONFIG_ARM64_MTE #ifdef CONFIG_ARM64_MTE
u64 sctlr_tcf0; u64 sctlr_tcf0;
u64 gcr_user_incl; u64 gcr_user_excl;
#endif #endif
}; };
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifndef __ASM_STRING_H #ifndef __ASM_STRING_H
#define __ASM_STRING_H #define __ASM_STRING_H
#ifndef CONFIG_KASAN #if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
#define __HAVE_ARCH_STRRCHR #define __HAVE_ARCH_STRRCHR
extern char *strrchr(const char *, int c); extern char *strrchr(const char *, int c);
...@@ -48,7 +48,8 @@ extern void *__memset(void *, int, __kernel_size_t); ...@@ -48,7 +48,8 @@ extern void *__memset(void *, int, __kernel_size_t);
void memcpy_flushcache(void *dst, const void *src, size_t cnt); void memcpy_flushcache(void *dst, const void *src, size_t cnt);
#endif #endif
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(__SANITIZE_ADDRESS__)
/* /*
* For files that are not instrumented (e.g. mm/slub.c) we * For files that are not instrumented (e.g. mm/slub.c) we
......
...@@ -159,8 +159,28 @@ static inline void __uaccess_enable_hw_pan(void) ...@@ -159,8 +159,28 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN)); CONFIG_ARM64_PAN));
} }
/*
* The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
* affects EL0 and TCF affects EL1 irrespective of which TTBR is
* used.
* The kernel accesses TTBR0 usually with LDTR/STTR instructions
* when UAO is available, so these would act as EL0 accesses using
* TCF0.
* However futex.h code uses exclusives which would be executed as
* EL1, this can potentially cause a tag check fault even if the
* user disables TCF0.
*
* To address the problem we set the PSTATE.TCO bit in uaccess_enable()
* and reset it in uaccess_disable().
*
* The Tag check override (TCO) bit disables temporarily the tag checking
* preventing the issue.
*/
static inline void uaccess_disable_privileged(void) static inline void uaccess_disable_privileged(void)
{ {
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
if (uaccess_ttbr0_disable()) if (uaccess_ttbr0_disable())
return; return;
...@@ -169,6 +189,9 @@ static inline void uaccess_disable_privileged(void) ...@@ -169,6 +189,9 @@ static inline void uaccess_disable_privileged(void)
static inline void uaccess_enable_privileged(void) static inline void uaccess_enable_privileged(void)
{ {
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
if (uaccess_ttbr0_enable()) if (uaccess_ttbr0_enable())
return; return;
......
...@@ -46,6 +46,9 @@ int main(void) ...@@ -46,6 +46,9 @@ int main(void)
#ifdef CONFIG_ARM64_PTR_AUTH #ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
#ifdef CONFIG_ARM64_MTE
DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl));
#endif #endif
BLANK(); BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
......
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kasan.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
...@@ -1710,6 +1711,8 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) ...@@ -1710,6 +1711,8 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
cleared_zero_page = true; cleared_zero_page = true;
mte_clear_page_tags(lm_alias(empty_zero_page)); mte_clear_page_tags(lm_alias(empty_zero_page));
} }
kasan_init_hw_tags_cpu();
} }
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
......
...@@ -173,6 +173,43 @@ alternative_else_nop_endif ...@@ -173,6 +173,43 @@ alternative_else_nop_endif
#endif #endif
.endm .endm
.macro mte_set_gcr, tmp, tmp2
#ifdef CONFIG_ARM64_MTE
/*
* Calculate and set the exclude mask preserving
* the RRND (bit[16]) setting.
*/
mrs_s \tmp2, SYS_GCR_EL1
bfi \tmp2, \tmp, #0, #16
msr_s SYS_GCR_EL1, \tmp2
isb
#endif
.endm
.macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
ldr_l \tmp, gcr_kernel_excl
mte_set_gcr \tmp, \tmp2
1:
#endif
.endm
.macro mte_set_user_gcr, tsk, tmp, tmp2
#ifdef CONFIG_ARM64_MTE
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
mte_set_gcr \tmp, \tmp2
1:
#endif
.endm
.macro kernel_entry, el, regsize = 64 .macro kernel_entry, el, regsize = 64
.if \regsize == 32 .if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0 mov w0, w0 // zero upper 32 bits of x0
...@@ -212,6 +249,8 @@ alternative_else_nop_endif ...@@ -212,6 +249,8 @@ alternative_else_nop_endif
ptrauth_keys_install_kernel tsk, x20, x22, x23 ptrauth_keys_install_kernel tsk, x20, x22, x23
mte_set_kernel_gcr x22, x23
scs_load tsk, x20 scs_load tsk, x20
.else .else
add x21, sp, #S_FRAME_SIZE add x21, sp, #S_FRAME_SIZE
...@@ -315,6 +354,8 @@ alternative_else_nop_endif ...@@ -315,6 +354,8 @@ alternative_else_nop_endif
/* No kernel C function calls after this as user keys are set. */ /* No kernel C function calls after this as user keys are set. */
ptrauth_keys_install_user tsk, x0, x1, x2 ptrauth_keys_install_user tsk, x0, x1, x2
mte_set_user_gcr tsk, x0, x1
apply_ssbd 0, x0, x1 apply_ssbd 0, x0, x1
.endif .endif
......
...@@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) ...@@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
bl __pi_memset bl __pi_memset
dsb ishst // Make zero page visible to PTW dsb ishst // Make zero page visible to PTW
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bl kasan_early_init bl kasan_early_init
#endif #endif
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
......
...@@ -371,6 +371,11 @@ static void swsusp_mte_restore_tags(void) ...@@ -371,6 +371,11 @@ static void swsusp_mte_restore_tags(void)
unsigned long pfn = xa_state.xa_index; unsigned long pfn = xa_state.xa_index;
struct page *page = pfn_to_online_page(pfn); struct page *page = pfn_to_online_page(pfn);
/*
* It is not required to invoke page_kasan_tag_reset(page)
* at this point since the tags stored in page->flags are
* already restored.
*/
mte_restore_page_tags(page_address(page), tags); mte_restore_page_tags(page_address(page), tags);
mte_free_tag_storage(tags); mte_free_tag_storage(tags);
......
...@@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp; ...@@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr; __efistub_strrchr = __pi_strrchr;
__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc; __efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
__efistub___memcpy = __pi_memcpy; __efistub___memcpy = __pi_memcpy;
__efistub___memmove = __pi_memmove; __efistub___memmove = __pi_memmove;
__efistub___memset = __pi_memset; __efistub___memset = __pi_memset;
......
...@@ -161,7 +161,8 @@ u64 __init kaslr_early_init(u64 dt_phys) ...@@ -161,7 +161,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
/* use the top 16 bits to randomize the linear region */ /* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48; memstart_offset_seed = seed >> 48;
if (IS_ENABLED(CONFIG_KASAN)) if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* /*
* KASAN does not expect the module region to intersect the * KASAN does not expect the module region to intersect the
* vmalloc region, since shadow memory is allocated for each * vmalloc region, since shadow memory is allocated for each
......
...@@ -30,7 +30,8 @@ void *module_alloc(unsigned long size) ...@@ -30,7 +30,8 @@ void *module_alloc(unsigned long size)
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN; gfp_mask |= __GFP_NOWARN;
if (IS_ENABLED(CONFIG_KASAN)) if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* don't exceed the static module region - see below */ /* don't exceed the static module region - see below */
module_alloc_end = MODULES_END; module_alloc_end = MODULES_END;
...@@ -39,7 +40,8 @@ void *module_alloc(unsigned long size) ...@@ -39,7 +40,8 @@ void *module_alloc(unsigned long size)
NUMA_NO_NODE, __builtin_return_address(0)); NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
!IS_ENABLED(CONFIG_KASAN)) !IS_ENABLED(CONFIG_KASAN_GENERIC) &&
!IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* /*
* KASAN can only deal with module allocations being served * KASAN can only deal with module allocations being served
* from the reserved module region, since the remainder of * from the reserved module region, since the remainder of
......
...@@ -13,13 +13,18 @@ ...@@ -13,13 +13,18 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/swapops.h> #include <linux/swapops.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/types.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/mte.h> #include <asm/mte.h>
#include <asm/mte-kasan.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
u64 gcr_kernel_excl __ro_after_init;
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{ {
pte_t old_pte = READ_ONCE(*ptep); pte_t old_pte = READ_ONCE(*ptep);
...@@ -31,6 +36,15 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) ...@@ -31,6 +36,15 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
return; return;
} }
page_kasan_tag_reset(page);
/*
* We need smp_wmb() in between setting the flags and clearing the
* tags because if another thread reads page->flags and builds a
* tagged address out of it, there is an actual dependency to the
* memory access, but on the current thread we do not guarantee that
* the new page->flags are visible before the tags were updated.
*/
smp_wmb();
mte_clear_page_tags(page_address(page)); mte_clear_page_tags(page_address(page));
} }
...@@ -72,6 +86,78 @@ int memcmp_pages(struct page *page1, struct page *page2) ...@@ -72,6 +86,78 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret; return ret;
} }
u8 mte_get_mem_tag(void *addr)
{
if (!system_supports_mte())
return 0xFF;
asm(__MTE_PREAMBLE "ldg %0, [%0]"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
u8 mte_get_random_tag(void)
{
void *addr;
if (!system_supports_mte())
return 0xFF;
asm(__MTE_PREAMBLE "irg %0, %0"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
void *ptr = addr;
if ((!system_supports_mte()) || (size == 0))
return addr;
/* Make sure that size is MTE granule aligned. */
WARN_ON(size & (MTE_GRANULE_SIZE - 1));
/* Make sure that the address is MTE granule aligned. */
WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
tag = 0xF0 | tag;
ptr = (void *)__tag_set(ptr, tag);
mte_assign_mem_tag_range(ptr, size);
return ptr;
}
void mte_init_tags(u64 max_tag)
{
static bool gcr_kernel_excl_initialized;
if (!gcr_kernel_excl_initialized) {
/*
* The format of the tags in KASAN is 0xFF and in MTE is 0xF.
* This conversion extracts an MTE tag from a KASAN tag.
*/
u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
max_tag), 0);
gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
gcr_kernel_excl_initialized = true;
}
/* Enable the kernel exclude mask for random tags generation. */
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
}
void mte_enable_kernel(void)
{
/* Enable MTE Sync Mode for EL1. */
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
isb();
}
static void update_sctlr_el1_tcf0(u64 tcf0) static void update_sctlr_el1_tcf0(u64 tcf0)
{ {
/* ISB required for the kernel uaccess routines */ /* ISB required for the kernel uaccess routines */
...@@ -92,23 +178,26 @@ static void set_sctlr_el1_tcf0(u64 tcf0) ...@@ -92,23 +178,26 @@ static void set_sctlr_el1_tcf0(u64 tcf0)
preempt_enable(); preempt_enable();
} }
static void update_gcr_el1_excl(u64 incl) static void update_gcr_el1_excl(u64 excl)
{ {
u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
/* /*
* Note that 'incl' is an include mask (controlled by the user via * Note that the mask controlled by the user via prctl() is an
* prctl()) while GCR_EL1 accepts an exclude mask. * include while GCR_EL1 accepts an exclude mask.
* No need for ISB since this only affects EL0 currently, implicit * No need for ISB since this only affects EL0 currently, implicit
* with ERET. * with ERET.
*/ */
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
} }
static void set_gcr_el1_excl(u64 incl) static void set_gcr_el1_excl(u64 excl)
{ {
current->thread.gcr_user_incl = incl; current->thread.gcr_user_excl = excl;
update_gcr_el1_excl(incl);
/*
* SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
* by mte_set_user_gcr() in kernel_exit,
*/
} }
void flush_mte_state(void) void flush_mte_state(void)
...@@ -123,7 +212,7 @@ void flush_mte_state(void) ...@@ -123,7 +212,7 @@ void flush_mte_state(void)
/* disable tag checking */ /* disable tag checking */
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
/* reset tag generation mask */ /* reset tag generation mask */
set_gcr_el1_excl(0); set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
} }
void mte_thread_switch(struct task_struct *next) void mte_thread_switch(struct task_struct *next)
...@@ -134,7 +223,6 @@ void mte_thread_switch(struct task_struct *next) ...@@ -134,7 +223,6 @@ void mte_thread_switch(struct task_struct *next)
/* avoid expensive SCTLR_EL1 accesses if no change */ /* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
update_gcr_el1_excl(next->thread.gcr_user_incl);
} }
void mte_suspend_exit(void) void mte_suspend_exit(void)
...@@ -142,13 +230,14 @@ void mte_suspend_exit(void) ...@@ -142,13 +230,14 @@ void mte_suspend_exit(void)
if (!system_supports_mte()) if (!system_supports_mte())
return; return;
update_gcr_el1_excl(current->thread.gcr_user_incl); update_gcr_el1_excl(gcr_kernel_excl);
} }
long set_mte_ctrl(struct task_struct *task, unsigned long arg) long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{ {
u64 tcf0; u64 tcf0;
u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT; u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte()) if (!system_supports_mte())
return 0; return 0;
...@@ -169,10 +258,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) ...@@ -169,10 +258,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
if (task != current) { if (task != current) {
task->thread.sctlr_tcf0 = tcf0; task->thread.sctlr_tcf0 = tcf0;
task->thread.gcr_user_incl = gcr_incl; task->thread.gcr_user_excl = gcr_excl;
} else { } else {
set_sctlr_el1_tcf0(tcf0); set_sctlr_el1_tcf0(tcf0);
set_gcr_el1_excl(gcr_incl); set_gcr_el1_excl(gcr_excl);
} }
return 0; return 0;
...@@ -181,11 +270,12 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) ...@@ -181,11 +270,12 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
long get_mte_ctrl(struct task_struct *task) long get_mte_ctrl(struct task_struct *task)
{ {
unsigned long ret; unsigned long ret;
u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte()) if (!system_supports_mte())
return 0; return 0;
ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT; ret = incl << PR_MTE_TAG_SHIFT;
switch (task->thread.sctlr_tcf0) { switch (task->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE: case SCTLR_EL1_TCF0_NONE:
......
...@@ -358,7 +358,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) ...@@ -358,7 +358,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
smp_build_mpidr_hash(); smp_build_mpidr_hash();
/* Init percpu seeds for random tags after cpus are set up. */ /* Init percpu seeds for random tags after cpus are set up. */
kasan_init_tags(); kasan_init_sw_tags();
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
/* /*
......
...@@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume) ...@@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume)
*/ */
bl cpu_do_resume bl cpu_do_resume
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
mov x0, sp mov x0, sp
bl kasan_unpoison_task_stack_below bl kasan_unpoison_task_stack_below
#endif #endif
......
...@@ -462,6 +462,8 @@ void __init smp_prepare_boot_cpu(void) ...@@ -462,6 +462,8 @@ void __init smp_prepare_boot_cpu(void)
/* Conditionally switch to GIC PMR for interrupt masking */ /* Conditionally switch to GIC PMR for interrupt masking */
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())
init_gic_priority_masking(); init_gic_priority_masking();
kasan_init_hw_tags();
} }
static u64 __init of_get_cpu_mpidr(struct device_node *dn) static u64 __init of_get_cpu_mpidr(struct device_node *dn)
......
...@@ -149,3 +149,19 @@ SYM_FUNC_START(mte_restore_page_tags) ...@@ -149,3 +149,19 @@ SYM_FUNC_START(mte_restore_page_tags)
ret ret
SYM_FUNC_END(mte_restore_page_tags) SYM_FUNC_END(mte_restore_page_tags)
/*
* Assign allocation tags for a region of memory based on the pointer tag
* x0 - source pointer
* x1 - size
*
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
* size must be non-zero and MTE_GRANULE_SIZE aligned.
*/
SYM_FUNC_START(mte_assign_mem_tag_range)
1: stg x0, [x0]
add x0, x0, #MTE_GRANULE_SIZE
subs x1, x1, #MTE_GRANULE_SIZE
b.gt 1b
ret
SYM_FUNC_END(mte_assign_mem_tag_range)
...@@ -23,6 +23,15 @@ void copy_highpage(struct page *to, struct page *from) ...@@ -23,6 +23,15 @@ void copy_highpage(struct page *to, struct page *from)
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
set_bit(PG_mte_tagged, &to->flags); set_bit(PG_mte_tagged, &to->flags);
page_kasan_tag_reset(to);
/*
* We need smp_wmb() in between setting the flags and clearing the
* tags because if another thread reads page->flags and builds a
* tagged address out of it, there is an actual dependency to the
* memory access, but on the current thread we do not guarantee that
* the new page->flags are visible before the tags were updated.
*/
smp_wmb();
mte_copy_page_tags(kto, kfrom); mte_copy_page_tags(kto, kfrom);
} }
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/page-flags.h> #include <linux/page-flags.h>
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/mte.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
...@@ -296,6 +298,57 @@ static void die_kernel_fault(const char *msg, unsigned long addr, ...@@ -296,6 +298,57 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
do_exit(SIGKILL); do_exit(SIGKILL);
} }
#ifdef CONFIG_KASAN_HW_TAGS
static void report_tag_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
bool is_write = ((esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT) != 0;
/*
* SAS bits aren't set for all faults reported in EL1, so we can't
* find out access size.
*/
kasan_report(addr, 0, is_write, regs->pc);
}
#else
/* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */
static inline void report_tag_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs) { }
#endif
static void do_tag_recovery(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
static bool reported;
if (!READ_ONCE(reported)) {
report_tag_fault(addr, esr, regs);
WRITE_ONCE(reported, true);
}
/*
* Disable MTE Tag Checking on the local CPU for the current EL.
* It will be done lazily on the other CPUs when they will hit a
* tag fault.
*/
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE);
isb();
}
static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
{
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc = esr & ESR_ELx_FSC;
if (ec != ESR_ELx_EC_DABT_CUR)
return false;
if (fsc == ESR_ELx_FSC_MTE)
return true;
return false;
}
static void __do_kernel_fault(unsigned long addr, unsigned int esr, static void __do_kernel_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
...@@ -312,6 +365,12 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, ...@@ -312,6 +365,12 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
"Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
return; return;
if (is_el1_mte_sync_tag_check_fault(esr)) {
do_tag_recovery(addr, esr, regs);
return;
}
if (is_el1_permission_fault(addr, esr, regs)) { if (is_el1_permission_fault(addr, esr, regs)) {
if (esr & ESR_ELx_WNR) if (esr & ESR_ELx_WNR)
msg = "write to read-only memory"; msg = "write to read-only memory";
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
/* /*
...@@ -208,7 +210,7 @@ static void __init clear_pgds(unsigned long start, ...@@ -208,7 +210,7 @@ static void __init clear_pgds(unsigned long start,
set_pgd(pgd_offset_k(start), __pgd(0)); set_pgd(pgd_offset_k(start), __pgd(0));
} }
void __init kasan_init(void) static void __init kasan_init_shadow(void)
{ {
u64 kimg_shadow_start, kimg_shadow_end; u64 kimg_shadow_start, kimg_shadow_end;
u64 mod_shadow_start, mod_shadow_end; u64 mod_shadow_start, mod_shadow_end;
...@@ -269,8 +271,21 @@ void __init kasan_init(void) ...@@ -269,8 +271,21 @@ void __init kasan_init(void)
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
}
/* At this point kasan is fully initialized. Enable error messages */ static void __init kasan_init_depth(void)
{
init_task.kasan_depth = 0; init_task.kasan_depth = 0;
}
void __init kasan_init(void)
{
kasan_init_shadow();
kasan_init_depth();
#if defined(CONFIG_KASAN_GENERIC)
/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
pr_info("KernelAddressSanitizer initialized\n"); pr_info("KernelAddressSanitizer initialized\n");
#endif
} }
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
...@@ -53,6 +53,15 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page) ...@@ -53,6 +53,15 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
if (!tags) if (!tags)
return false; return false;
page_kasan_tag_reset(page);
/*
* We need smp_wmb() in between setting the flags and clearing the
* tags because if another thread reads page->flags and builds a
* tagged address out of it, there is an actual dependency to the
* memory access, but on the current thread we do not guarantee that
* the new page->flags are visible before the tags were updated.
*/
smp_wmb();
mte_restore_page_tags(page_address(page), tags); mte_restore_page_tags(page_address(page), tags);
return true; return true;
......
...@@ -40,9 +40,15 @@ ...@@ -40,9 +40,15 @@
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
#ifdef CONFIG_KASAN_SW_TAGS #ifdef CONFIG_KASAN_SW_TAGS
#define TCR_KASAN_FLAGS TCR_TBI1 | TCR_TBID1 #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
#else #else
#define TCR_KASAN_FLAGS 0 #define TCR_KASAN_SW_FLAGS 0
#endif
#ifdef CONFIG_KASAN_HW_TAGS
#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1
#else
#define TCR_KASAN_HW_FLAGS 0
#endif #endif
/* /*
...@@ -427,6 +433,10 @@ SYM_FUNC_START(__cpu_setup) ...@@ -427,6 +433,10 @@ SYM_FUNC_START(__cpu_setup)
*/ */
mov_q x5, MAIR_EL1_SET mov_q x5, MAIR_EL1_SET
#ifdef CONFIG_ARM64_MTE #ifdef CONFIG_ARM64_MTE
mte_tcr .req x20
mov mte_tcr, #0
/* /*
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
* (ID_AA64PFR1_EL1[11:8] > 1). * (ID_AA64PFR1_EL1[11:8] > 1).
...@@ -447,6 +457,9 @@ SYM_FUNC_START(__cpu_setup) ...@@ -447,6 +457,9 @@ SYM_FUNC_START(__cpu_setup)
/* clear any pending tag check faults in TFSR*_EL1 */ /* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
mov_q mte_tcr, TCR_KASAN_HW_FLAGS
1: 1:
#endif #endif
msr mair_el1, x5 msr mair_el1, x5
...@@ -456,7 +469,11 @@ SYM_FUNC_START(__cpu_setup) ...@@ -456,7 +469,11 @@ SYM_FUNC_START(__cpu_setup)
*/ */
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
#ifdef CONFIG_ARM64_MTE
orr x10, x10, mte_tcr
.unreq mte_tcr
#endif
tcr_clear_errata_bits x10, x9, x5 tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
enum address_markers_idx { enum address_markers_idx {
PAGE_OFFSET_NR = 0, PAGE_OFFSET_NR = 0,
PAGE_END_NR, PAGE_END_NR,
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
KASAN_START_NR, KASAN_START_NR,
#endif #endif
}; };
...@@ -37,7 +37,7 @@ enum address_markers_idx { ...@@ -37,7 +37,7 @@ enum address_markers_idx {
static struct addr_marker address_markers[] = { static struct addr_marker address_markers[] = {
{ PAGE_OFFSET, "Linear Mapping start" }, { PAGE_OFFSET, "Linear Mapping start" },
{ 0 /* PAGE_END */, "Linear Mapping end" }, { 0 /* PAGE_END */, "Linear Mapping end" },
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" }, { KASAN_SHADOW_END, "Kasan shadow end" },
#endif #endif
...@@ -383,7 +383,7 @@ void ptdump_check_wx(void) ...@@ -383,7 +383,7 @@ void ptdump_check_wx(void)
static int ptdump_init(void) static int ptdump_init(void)
{ {
address_markers[PAGE_END_NR].start_address = PAGE_END; address_markers[PAGE_END_NR].start_address = PAGE_END;
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
#endif #endif
ptdump_initialize(); ptdump_initialize();
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/errno.h>
#undef CONFIG_KASAN #undef CONFIG_KASAN
#undef CONFIG_KASAN_GENERIC
#include "../lib/string.c" #include "../lib/string.c"
int strncmp(const char *cs, const char *ct, size_t count) int strncmp(const char *cs, const char *ct, size_t count)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#undef CONFIG_PARAVIRT_XXL #undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS #undef CONFIG_PARAVIRT_SPINLOCKS
#undef CONFIG_KASAN #undef CONFIG_KASAN
#undef CONFIG_KASAN_GENERIC
/* cpu_feature_enabled() cannot be used this early */ /* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5 #define USE_EARLY_PGTABLE_L5
......
...@@ -112,7 +112,7 @@ SYM_FUNC_START(do_suspend_lowlevel) ...@@ -112,7 +112,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
movq pt_regs_r14(%rax), %r14 movq pt_regs_r14(%rax), %r14
movq pt_regs_r15(%rax), %r15 movq pt_regs_r15(%rax), %r15
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
/* /*
* The suspend path may have poisoned some areas deeper in the stack, * The suspend path may have poisoned some areas deeper in the stack,
* which we now need to unpoison. * which we now need to unpoison.
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* even in compilation units that selectively disable KASAN, but must use KASAN * even in compilation units that selectively disable KASAN, but must use KASAN
* to validate access to an address. Never use these in header files! * to validate access to an address. Never use these in header files!
*/ */
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bool __kasan_check_read(const volatile void *p, unsigned int size); bool __kasan_check_read(const volatile void *p, unsigned int size);
bool __kasan_check_write(const volatile void *p, unsigned int size); bool __kasan_check_write(const volatile void *p, unsigned int size);
#else #else
......
This diff is collapsed.
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/kasan.h>
struct mempolicy; struct mempolicy;
struct anon_vma; struct anon_vma;
...@@ -1421,23 +1422,31 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) ...@@ -1421,23 +1422,31 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
} }
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_KASAN_SW_TAGS #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
static inline u8 page_kasan_tag(const struct page *page) static inline u8 page_kasan_tag(const struct page *page)
{ {
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; if (kasan_enabled())
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
return 0xff;
} }
static inline void page_kasan_tag_set(struct page *page, u8 tag) static inline void page_kasan_tag_set(struct page *page, u8 tag)
{ {
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); if (kasan_enabled()) {
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
}
} }
static inline void page_kasan_tag_reset(struct page *page) static inline void page_kasan_tag_reset(struct page *page)
{ {
page_kasan_tag_set(page, 0xff); if (kasan_enabled())
page_kasan_tag_set(page, 0xff);
} }
#else
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline u8 page_kasan_tag(const struct page *page) static inline u8 page_kasan_tag(const struct page *page)
{ {
return 0xff; return 0xff;
...@@ -1445,7 +1454,8 @@ static inline u8 page_kasan_tag(const struct page *page) ...@@ -1445,7 +1454,8 @@ static inline u8 page_kasan_tag(const struct page *page)
static inline void page_kasan_tag_set(struct page *page, u8 tag) { } static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
static inline void page_kasan_tag_reset(struct page *page) { } static inline void page_kasan_tag_reset(struct page *page) { }
#endif
#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
static inline struct zone *page_zone(const struct page *page) static inline struct zone *page_zone(const struct page *page)
{ {
......
...@@ -96,7 +96,8 @@ void module_arch_cleanup(struct module *mod); ...@@ -96,7 +96,8 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */ /* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod); void module_arch_freeing_init(struct module *mod);
#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(CONFIG_KASAN_VMALLOC)
#include <linux/kasan.h> #include <linux/kasan.h>
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else #else
......
...@@ -77,7 +77,7 @@ ...@@ -77,7 +77,7 @@
#define LAST_CPUPID_SHIFT 0 #define LAST_CPUPID_SHIFT 0
#endif #endif
#ifdef CONFIG_KASAN_SW_TAGS #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
#define KASAN_TAG_WIDTH 8 #define KASAN_TAG_WIDTH 8
#else #else
#define KASAN_TAG_WIDTH 0 #define KASAN_TAG_WIDTH 0
......
...@@ -1234,7 +1234,7 @@ struct task_struct { ...@@ -1234,7 +1234,7 @@ struct task_struct {
u64 timer_slack_ns; u64 timer_slack_ns;
u64 default_timer_slack_ns; u64 default_timer_slack_ns;
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
unsigned int kasan_depth; unsigned int kasan_depth;
#endif #endif
......
...@@ -267,7 +267,7 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o ...@@ -267,7 +267,7 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
......
...@@ -176,7 +176,7 @@ struct task_struct init_task ...@@ -176,7 +176,7 @@ struct task_struct init_task
.numa_group = NULL, .numa_group = NULL,
.numa_faults = NULL, .numa_faults = NULL,
#endif #endif
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
.kasan_depth = 1, .kasan_depth = 1,
#endif #endif
#ifdef CONFIG_KCSAN #ifdef CONFIG_KCSAN
......
...@@ -225,8 +225,8 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) ...@@ -225,8 +225,8 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
if (!s) if (!s)
continue; continue;
/* Clear the KASAN shadow of the stack. */ /* Mark stack accessible for KASAN. */
kasan_unpoison_shadow(s->addr, THREAD_SIZE); kasan_unpoison_range(s->addr, THREAD_SIZE);
/* Clear stale pointers from reused stack. */ /* Clear stale pointers from reused stack. */
memset(s->addr, 0, THREAD_SIZE); memset(s->addr, 0, THREAD_SIZE);
......
...@@ -6,7 +6,10 @@ config HAVE_ARCH_KASAN ...@@ -6,7 +6,10 @@ config HAVE_ARCH_KASAN
config HAVE_ARCH_KASAN_SW_TAGS config HAVE_ARCH_KASAN_SW_TAGS
bool bool
config HAVE_ARCH_KASAN_VMALLOC config HAVE_ARCH_KASAN_HW_TAGS
bool
config HAVE_ARCH_KASAN_VMALLOC
bool bool
config CC_HAS_KASAN_GENERIC config CC_HAS_KASAN_GENERIC
...@@ -15,15 +18,20 @@ config CC_HAS_KASAN_GENERIC ...@@ -15,15 +18,20 @@ config CC_HAS_KASAN_GENERIC
config CC_HAS_KASAN_SW_TAGS config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress) def_bool $(cc-option, -fsanitize=kernel-hwaddress)
# This option is only required for software KASAN modes.
# Old GCC versions don't have proper support for no_sanitize_address.
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details.
config CC_HAS_WORKING_NOSANITIZE_ADDRESS config CC_HAS_WORKING_NOSANITIZE_ADDRESS
def_bool !CC_IS_GCC || GCC_VERSION >= 80300 def_bool !CC_IS_GCC || GCC_VERSION >= 80300
menuconfig KASAN menuconfig KASAN
bool "KASAN: runtime memory debugger" bool "KASAN: runtime memory debugger"
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \ depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS) (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS select STACKDEPOT
help help
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs. designed to find out-of-bounds accesses and use-after-free bugs.
...@@ -35,21 +43,24 @@ choice ...@@ -35,21 +43,24 @@ choice
prompt "KASAN mode" prompt "KASAN mode"
default KASAN_GENERIC default KASAN_GENERIC
help help
KASAN has two modes: generic KASAN (similar to userspace ASan, KASAN has three modes:
x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and 1. generic KASAN (similar to userspace ASan,
software tag-based KASAN (a version based on software memory x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC),
tagging, arm64 only, similar to userspace HWASan, enabled with 2. software tag-based KASAN (arm64 only, based on software
CONFIG_KASAN_SW_TAGS). memory tagging (similar to userspace HWASan), enabled with
CONFIG_KASAN_SW_TAGS), and
3. hardware tag-based KASAN (arm64 only, based on hardware
memory tagging, enabled with CONFIG_KASAN_HW_TAGS).
All KASAN modes are strictly debugging features.
Both generic and tag-based KASAN are strictly debugging features. For better error reports enable CONFIG_STACKTRACE.
config KASAN_GENERIC config KASAN_GENERIC
bool "Generic mode" bool "Generic mode"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB select SLUB_DEBUG if SLUB
select CONSTRUCTORS select CONSTRUCTORS
select STACKDEPOT
help help
Enables generic KASAN mode. Enables generic KASAN mode.
...@@ -62,23 +73,22 @@ config KASAN_GENERIC ...@@ -62,23 +73,22 @@ config KASAN_GENERIC
and introduces an overhead of ~x1.5 for the rest of the allocations. and introduces an overhead of ~x1.5 for the rest of the allocations.
The performance slowdown is ~x3. The performance slowdown is ~x3.
For better error detection enable CONFIG_STACKTRACE.
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot). (the resulting kernel does not boot).
config KASAN_SW_TAGS config KASAN_SW_TAGS
bool "Software tag-based mode" bool "Software tag-based mode"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB select SLUB_DEBUG if SLUB
select CONSTRUCTORS select CONSTRUCTORS
select STACKDEPOT
help help
Enables software tag-based KASAN mode. Enables software tag-based KASAN mode.
This mode requires Top Byte Ignore support by the CPU and therefore This mode require software memory tagging support in the form of
is only supported for arm64. This mode requires Clang. HWASan-like compiler instrumentation.
Currently this mode is only implemented for arm64 CPUs and relies on
Top Byte Ignore. This mode requires Clang.
This mode consumes about 1/16th of available memory at kernel start This mode consumes about 1/16th of available memory at kernel start
and introduces an overhead of ~20% for the rest of the allocations. and introduces an overhead of ~20% for the rest of the allocations.
...@@ -86,15 +96,27 @@ config KASAN_SW_TAGS ...@@ -86,15 +96,27 @@ config KASAN_SW_TAGS
casting and comparison, as it embeds tags into the top byte of each casting and comparison, as it embeds tags into the top byte of each
pointer. pointer.
For better error detection enable CONFIG_STACKTRACE.
Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot). (the resulting kernel does not boot).
config KASAN_HW_TAGS
bool "Hardware tag-based mode"
depends on HAVE_ARCH_KASAN_HW_TAGS
depends on SLUB
help
Enables hardware tag-based KASAN mode.
This mode requires hardware memory tagging support, and can be used
by any architecture that provides it.
Currently this mode is only implemented for arm64 CPUs starting from
ARMv8.5 and relies on Memory Tagging Extension and Top Byte Ignore.
endchoice endchoice
choice choice
prompt "Instrumentation type" prompt "Instrumentation type"
depends on KASAN_GENERIC || KASAN_SW_TAGS
default KASAN_OUTLINE default KASAN_OUTLINE
config KASAN_OUTLINE config KASAN_OUTLINE
...@@ -118,6 +140,7 @@ endchoice ...@@ -118,6 +140,7 @@ endchoice
config KASAN_STACK_ENABLE config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
depends on KASAN_GENERIC || KASAN_SW_TAGS
help help
The LLVM stack address sanitizer has a know problem that The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see causes excessive stack usage in a lot of functions, see
...@@ -146,7 +169,7 @@ config KASAN_SW_TAGS_IDENTIFY ...@@ -146,7 +169,7 @@ config KASAN_SW_TAGS_IDENTIFY
config KASAN_VMALLOC config KASAN_VMALLOC
bool "Back mappings in vmalloc space with real shadow memory" bool "Back mappings in vmalloc space with real shadow memory"
depends on HAVE_ARCH_KASAN_VMALLOC depends on KASAN_GENERIC && HAVE_ARCH_KASAN_VMALLOC
help help
By default, the shadow region for vmalloc space is the read-only By default, the shadow region for vmalloc space is the read-only
zero page. This means that KASAN cannot detect errors involving zero page. This means that KASAN cannot detect errors involving
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "../mm/kasan/kasan.h" #include "../mm/kasan/kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE) #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
/* /*
* We assign some test results to these globals to make sure the tests * We assign some test results to these globals to make sure the tests
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include "../mm/kasan/kasan.h" #include "../mm/kasan/kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE) #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
static noinline void __init copy_user_test(void) static noinline void __init copy_user_test(void)
{ {
......
...@@ -6,12 +6,15 @@ KCOV_INSTRUMENT := n ...@@ -6,12 +6,15 @@ KCOV_INSTRUMENT := n
# Disable ftrace to avoid recursion. # Disable ftrace to avoid recursion.
CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_generic_report.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_quarantine.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_quarantine.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_report_generic.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_tags_report.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_report_hw_tags.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_report_sw_tags.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_shadow.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_hw_tags.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sw_tags.o = $(CC_FLAGS_FTRACE)
# Function splitter causes unnecessary splits in __asan_load1/__asan_store1 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
...@@ -22,13 +25,17 @@ CC_FLAGS_KASAN_RUNTIME += -DDISABLE_BRANCH_PROFILING ...@@ -22,13 +25,17 @@ CC_FLAGS_KASAN_RUNTIME += -DDISABLE_BRANCH_PROFILING
CFLAGS_common.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_common.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_generic.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_generic.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_generic_report.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_init.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_init.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_quarantine.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_quarantine.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_report.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_report.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_tags.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_report_generic.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_tags_report.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_report_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_report_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_shadow.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
obj-$(CONFIG_KASAN) := common.o init.o report.o obj-$(CONFIG_KASAN) := common.o report.o
obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o
obj-$(CONFIG_KASAN_SW_TAGS) += tags.o tags_report.o obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o
obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o
This diff is collapsed.
...@@ -7,15 +7,8 @@ ...@@ -7,15 +7,8 @@
* *
* Some code borrowed from https://github.com/xairy/kasan-prototype by * Some code borrowed from https://github.com/xairy/kasan-prototype by
* Andrey Konovalov <andreyknvl@gmail.com> * Andrey Konovalov <andreyknvl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h> #include <linux/export.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -51,7 +44,7 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr) ...@@ -51,7 +44,7 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(shadow_value)) { if (unlikely(shadow_value)) {
s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
return unlikely(last_accessible_byte >= shadow_value); return unlikely(last_accessible_byte >= shadow_value);
} }
...@@ -67,7 +60,7 @@ static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, ...@@ -67,7 +60,7 @@ static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
* Access crosses 8(shadow size)-byte boundary. Such access maps * Access crosses 8(shadow size)-byte boundary. Such access maps
* into 2 shadow bytes, so we need to check them both. * into 2 shadow bytes, so we need to check them both.
*/ */
if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
return *shadow_addr || memory_is_poisoned_1(addr + size - 1); return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
return memory_is_poisoned_1(addr + size - 1); return memory_is_poisoned_1(addr + size - 1);
...@@ -78,7 +71,7 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr) ...@@ -78,7 +71,7 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
/* Unaligned 16-bytes access maps into 3 shadow bytes. */ /* Unaligned 16-bytes access maps into 3 shadow bytes. */
if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
return *shadow_addr || memory_is_poisoned_1(addr + 15); return *shadow_addr || memory_is_poisoned_1(addr + 15);
return *shadow_addr; return *shadow_addr;
...@@ -139,7 +132,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr, ...@@ -139,7 +132,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr,
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
if (unlikely(ret != (unsigned long)last_shadow || if (unlikely(ret != (unsigned long)last_shadow ||
((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
return true; return true;
} }
return false; return false;
...@@ -192,6 +185,13 @@ bool check_memory_region(unsigned long addr, size_t size, bool write, ...@@ -192,6 +185,13 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
return check_memory_region_inline(addr, size, write, ret_ip); return check_memory_region_inline(addr, size, write, ret_ip);
} }
bool check_invalid_free(void *addr)
{
s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
return shadow_byte < 0 || shadow_byte >= KASAN_GRANULE_SIZE;
}
void kasan_cache_shrink(struct kmem_cache *cache) void kasan_cache_shrink(struct kmem_cache *cache)
{ {
quarantine_remove_cache(cache); quarantine_remove_cache(cache);
...@@ -205,13 +205,13 @@ void kasan_cache_shutdown(struct kmem_cache *cache) ...@@ -205,13 +205,13 @@ void kasan_cache_shutdown(struct kmem_cache *cache)
static void register_global(struct kasan_global *global) static void register_global(struct kasan_global *global)
{ {
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
kasan_unpoison_shadow(global->beg, global->size); unpoison_range(global->beg, global->size);
kasan_poison_shadow(global->beg + aligned_size, poison_range(global->beg + aligned_size,
global->size_with_redzone - aligned_size, global->size_with_redzone - aligned_size,
KASAN_GLOBAL_REDZONE); KASAN_GLOBAL_REDZONE);
} }
void __asan_register_globals(struct kasan_global *globals, size_t size) void __asan_register_globals(struct kasan_global *globals, size_t size)
...@@ -279,10 +279,10 @@ EXPORT_SYMBOL(__asan_handle_no_return); ...@@ -279,10 +279,10 @@ EXPORT_SYMBOL(__asan_handle_no_return);
/* Emitted by compiler to poison alloca()ed objects. */ /* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size) void __asan_alloca_poison(unsigned long addr, size_t size)
{ {
size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
rounded_up_size; rounded_up_size;
size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
const void *left_redzone = (const void *)(addr - const void *left_redzone = (const void *)(addr -
KASAN_ALLOCA_REDZONE_SIZE); KASAN_ALLOCA_REDZONE_SIZE);
...@@ -290,13 +290,12 @@ void __asan_alloca_poison(unsigned long addr, size_t size) ...@@ -290,13 +290,12 @@ void __asan_alloca_poison(unsigned long addr, size_t size)
WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
kasan_unpoison_shadow((const void *)(addr + rounded_down_size), unpoison_range((const void *)(addr + rounded_down_size),
size - rounded_down_size); size - rounded_down_size);
kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, poison_range(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
KASAN_ALLOCA_LEFT); KASAN_ALLOCA_LEFT);
kasan_poison_shadow(right_redzone, poison_range(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
padding_size + KASAN_ALLOCA_REDZONE_SIZE, KASAN_ALLOCA_RIGHT);
KASAN_ALLOCA_RIGHT);
} }
EXPORT_SYMBOL(__asan_alloca_poison); EXPORT_SYMBOL(__asan_alloca_poison);
...@@ -306,7 +305,7 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) ...@@ -306,7 +305,7 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
if (unlikely(!stack_top || stack_top > stack_bottom)) if (unlikely(!stack_top || stack_top > stack_bottom))
return; return;
kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); unpoison_range(stack_top, stack_bottom - stack_top);
} }
EXPORT_SYMBOL(__asan_allocas_unpoison); EXPORT_SYMBOL(__asan_allocas_unpoison);
...@@ -329,7 +328,7 @@ void kasan_record_aux_stack(void *addr) ...@@ -329,7 +328,7 @@ void kasan_record_aux_stack(void *addr)
{ {
struct page *page = kasan_addr_to_page(addr); struct page *page = kasan_addr_to_page(addr);
struct kmem_cache *cache; struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_info; struct kasan_alloc_meta *alloc_meta;
void *object; void *object;
if (!(page && PageSlab(page))) if (!(page && PageSlab(page)))
...@@ -337,10 +336,10 @@ void kasan_record_aux_stack(void *addr) ...@@ -337,10 +336,10 @@ void kasan_record_aux_stack(void *addr)
cache = page->slab_cache; cache = page->slab_cache;
object = nearest_obj(cache, page, addr); object = nearest_obj(cache, page, addr);
alloc_info = get_alloc_info(cache, object); alloc_meta = kasan_get_alloc_meta(cache, object);
alloc_info->aux_stack[1] = alloc_info->aux_stack[0]; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT); alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
} }
void kasan_set_free_info(struct kmem_cache *cache, void kasan_set_free_info(struct kmem_cache *cache,
...@@ -348,12 +347,12 @@ void kasan_set_free_info(struct kmem_cache *cache, ...@@ -348,12 +347,12 @@ void kasan_set_free_info(struct kmem_cache *cache,
{ {
struct kasan_free_meta *free_meta; struct kasan_free_meta *free_meta;
free_meta = get_free_info(cache, object); free_meta = kasan_get_free_meta(cache, object);
kasan_set_track(&free_meta->free_track, GFP_NOWAIT); if (!free_meta)
return;
/* kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
* the object was freed and has free track set /* The object was freed and has free track set. */
*/
*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK; *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
} }
...@@ -362,5 +361,6 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, ...@@ -362,5 +361,6 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
{ {
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK) if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
return NULL; return NULL;
return &get_free_info(cache, object)->free_track; /* Free meta must be present with KASAN_KMALLOC_FREETRACK. */
return &kasan_get_free_meta(cache, object)->free_track;
} }
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains core hardware tag-based KASAN code.
*
* Copyright (c) 2020 Google, Inc.
* Author: Andrey Konovalov <andreyknvl@google.com>
*/
#define pr_fmt(fmt) "kasan: " fmt
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/static_key.h>
#include <linux/string.h>
#include <linux/types.h>
#include "kasan.h"
enum kasan_arg_mode {
KASAN_ARG_MODE_DEFAULT,
KASAN_ARG_MODE_OFF,
KASAN_ARG_MODE_PROD,
KASAN_ARG_MODE_FULL,
};
enum kasan_arg_stacktrace {
KASAN_ARG_STACKTRACE_DEFAULT,
KASAN_ARG_STACKTRACE_OFF,
KASAN_ARG_STACKTRACE_ON,
};
enum kasan_arg_fault {
KASAN_ARG_FAULT_DEFAULT,
KASAN_ARG_FAULT_REPORT,
KASAN_ARG_FAULT_PANIC,
};
static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
/* Whether KASAN is enabled at all. */
DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
EXPORT_SYMBOL(kasan_flag_enabled);
/* Whether to collect alloc/free stack traces. */
DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
/* Whether panic or disable tag checking on fault. */
bool kasan_flag_panic __ro_after_init;
/* kasan.mode=off/prod/full */
static int __init early_kasan_mode(char *arg)
{
if (!arg)
return -EINVAL;
if (!strcmp(arg, "off"))
kasan_arg_mode = KASAN_ARG_MODE_OFF;
else if (!strcmp(arg, "prod"))
kasan_arg_mode = KASAN_ARG_MODE_PROD;
else if (!strcmp(arg, "full"))
kasan_arg_mode = KASAN_ARG_MODE_FULL;
else
return -EINVAL;
return 0;
}
early_param("kasan.mode", early_kasan_mode);
/* kasan.stack=off/on */
static int __init early_kasan_flag_stacktrace(char *arg)
{
if (!arg)
return -EINVAL;
if (!strcmp(arg, "off"))
kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
else if (!strcmp(arg, "on"))
kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
else
return -EINVAL;
return 0;
}
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
/* kasan.fault=report/panic */
static int __init early_kasan_fault(char *arg)
{
if (!arg)
return -EINVAL;
if (!strcmp(arg, "report"))
kasan_arg_fault = KASAN_ARG_FAULT_REPORT;
else if (!strcmp(arg, "panic"))
kasan_arg_fault = KASAN_ARG_FAULT_PANIC;
else
return -EINVAL;
return 0;
}
early_param("kasan.fault", early_kasan_fault);
/* kasan_init_hw_tags_cpu() is called for each CPU. */
void kasan_init_hw_tags_cpu(void)
{
/*
* There's no need to check that the hardware is MTE-capable here,
* as this function is only called for MTE-capable hardware.
*/
/* If KASAN is disabled, do nothing. */
if (kasan_arg_mode == KASAN_ARG_MODE_OFF)
return;
hw_init_tags(KASAN_TAG_MAX);
hw_enable_tagging();
}
/* kasan_init_hw_tags() is called once on boot CPU. */
void __init kasan_init_hw_tags(void)
{
/* If hardware doesn't support MTE, do nothing. */
if (!system_supports_mte())
return;
/* Choose KASAN mode if kasan boot parameter is not provided. */
if (kasan_arg_mode == KASAN_ARG_MODE_DEFAULT) {
if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
kasan_arg_mode = KASAN_ARG_MODE_FULL;
else
kasan_arg_mode = KASAN_ARG_MODE_PROD;
}
/* Preset parameter values based on the mode. */
switch (kasan_arg_mode) {
case KASAN_ARG_MODE_DEFAULT:
/* Shouldn't happen as per the check above. */
WARN_ON(1);
return;
case KASAN_ARG_MODE_OFF:
/* If KASAN is disabled, do nothing. */
return;
case KASAN_ARG_MODE_PROD:
static_branch_enable(&kasan_flag_enabled);
break;
case KASAN_ARG_MODE_FULL:
static_branch_enable(&kasan_flag_enabled);
static_branch_enable(&kasan_flag_stacktrace);
break;
}
/* Now, optionally override the presets. */
switch (kasan_arg_stacktrace) {
case KASAN_ARG_STACKTRACE_DEFAULT:
break;
case KASAN_ARG_STACKTRACE_OFF:
static_branch_disable(&kasan_flag_stacktrace);
break;
case KASAN_ARG_STACKTRACE_ON:
static_branch_enable(&kasan_flag_stacktrace);
break;
}
switch (kasan_arg_fault) {
case KASAN_ARG_FAULT_DEFAULT:
break;
case KASAN_ARG_FAULT_REPORT:
kasan_flag_panic = false;
break;
case KASAN_ARG_FAULT_PANIC:
kasan_flag_panic = true;
break;
}
pr_info("KernelAddressSanitizer initialized\n");
}
void kasan_set_free_info(struct kmem_cache *cache,
void *object, u8 tag)
{
struct kasan_alloc_meta *alloc_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta)
kasan_set_track(&alloc_meta->free_track[0], GFP_NOWAIT);
}
struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
void *object, u8 tag)
{
struct kasan_alloc_meta *alloc_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
if (!alloc_meta)
return NULL;
return &alloc_meta->free_track[0];
}
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* This file contains some kasan initialization code. * This file contains KASAN shadow initialization code.
* *
* Copyright (c) 2015 Samsung Electronics Co., Ltd. * Copyright (c) 2015 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/ */
#include <linux/memblock.h> #include <linux/memblock.h>
...@@ -446,9 +441,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size) ...@@ -446,9 +441,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
addr = (unsigned long)kasan_mem_to_shadow(start); addr = (unsigned long)kasan_mem_to_shadow(start);
end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
if (WARN_ON((unsigned long)start % if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
return; return;
for (; addr < end; addr = next) { for (; addr < end; addr = next) {
...@@ -481,9 +475,8 @@ int kasan_add_zero_shadow(void *start, unsigned long size) ...@@ -481,9 +475,8 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
shadow_start = kasan_mem_to_shadow(start); shadow_start = kasan_mem_to_shadow(start);
shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
if (WARN_ON((unsigned long)start % if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
return -EINVAL; return -EINVAL;
ret = kasan_populate_early_shadow(shadow_start, shadow_end); ret = kasan_populate_early_shadow(shadow_start, shadow_end);
......
...@@ -5,8 +5,32 @@ ...@@ -5,8 +5,32 @@
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) #ifdef CONFIG_KASAN_HW_TAGS
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) #include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
static inline bool kasan_stack_collection_enabled(void)
{
return static_branch_unlikely(&kasan_flag_stacktrace);
}
#else
static inline bool kasan_stack_collection_enabled(void)
{
return true;
}
#endif
extern bool kasan_flag_panic __ro_after_init;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#else
#include <asm/mte-kasan.h>
#define KASAN_GRANULE_SIZE MTE_GRANULE_SIZE
#endif
#define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
#define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ #define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ #define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
...@@ -56,6 +80,13 @@ ...@@ -56,6 +80,13 @@
#define KASAN_ABI_VERSION 1 #define KASAN_ABI_VERSION 1
#endif #endif
/* Metadata layout customization. */
#define META_BYTES_PER_BLOCK 1
#define META_BLOCKS_PER_ROW 16
#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
#define META_ROWS_AROUND_ADDR 2
struct kasan_access_info { struct kasan_access_info {
const void *access_addr; const void *access_addr;
const void *first_bad_addr; const void *first_bad_addr;
...@@ -124,20 +155,33 @@ struct kasan_alloc_meta { ...@@ -124,20 +155,33 @@ struct kasan_alloc_meta {
struct qlist_node { struct qlist_node {
struct qlist_node *next; struct qlist_node *next;
}; };
/*
* Generic mode either stores free meta in the object itself or in the redzone
* after the object. In the former case free meta offset is 0, in the latter
* case it has some sane value smaller than INT_MAX. Use INT_MAX as free meta
* offset when free meta isn't present.
*/
#define KASAN_NO_FREE_META INT_MAX
struct kasan_free_meta { struct kasan_free_meta {
#ifdef CONFIG_KASAN_GENERIC
/* This field is used while the object is in the quarantine. /* This field is used while the object is in the quarantine.
* Otherwise it might be used for the allocator freelist. * Otherwise it might be used for the allocator freelist.
*/ */
struct qlist_node quarantine_link; struct qlist_node quarantine_link;
#ifdef CONFIG_KASAN_GENERIC
struct kasan_track free_track; struct kasan_track free_track;
#endif #endif
}; };
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object); const void *object);
struct kasan_free_meta *get_free_info(struct kmem_cache *cache, #ifdef CONFIG_KASAN_GENERIC
const void *object); struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
#endif
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
static inline const void *kasan_shadow_to_mem(const void *shadow_addr) static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{ {
...@@ -145,13 +189,11 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr) ...@@ -145,13 +189,11 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
<< KASAN_SHADOW_SCALE_SHIFT); << KASAN_SHADOW_SCALE_SHIFT);
} }
static inline bool addr_has_shadow(const void *addr) static inline bool addr_has_metadata(const void *addr)
{ {
return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
} }
void kasan_poison_shadow(const void *address, size_t size, u8 value);
/** /**
* check_memory_region - Check memory region, and report if invalid access. * check_memory_region - Check memory region, and report if invalid access.
* @addr: the accessed address * @addr: the accessed address
...@@ -163,8 +205,30 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value); ...@@ -163,8 +205,30 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value);
bool check_memory_region(unsigned long addr, size_t size, bool write, bool check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip); unsigned long ret_ip);
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline bool addr_has_metadata(const void *addr)
{
return true;
}
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
void print_tags(u8 addr_tag, const void *addr);
#else
static inline void print_tags(u8 addr_tag, const void *addr) { }
#endif
void *find_first_bad_addr(void *addr, size_t size); void *find_first_bad_addr(void *addr, size_t size);
const char *get_bug_type(struct kasan_access_info *info); const char *get_bug_type(struct kasan_access_info *info);
void metadata_fetch_row(char *buffer, void *row);
#if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK
void print_address_stack_frame(const void *addr);
#else
static inline void print_address_stack_frame(const void *addr) { }
#endif
bool kasan_report(unsigned long addr, size_t size, bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip); bool is_write, unsigned long ip);
...@@ -180,49 +244,92 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, ...@@ -180,49 +244,92 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
#if defined(CONFIG_KASAN_GENERIC) && \ #if defined(CONFIG_KASAN_GENERIC) && \
(defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); bool quarantine_put(struct kmem_cache *cache, void *object);
void quarantine_reduce(void); void quarantine_reduce(void);
void quarantine_remove_cache(struct kmem_cache *cache); void quarantine_remove_cache(struct kmem_cache *cache);
#else #else
static inline void quarantine_put(struct kasan_free_meta *info, static inline bool quarantine_put(struct kmem_cache *cache, void *object) { return false; }
struct kmem_cache *cache) { }
static inline void quarantine_reduce(void) { } static inline void quarantine_reduce(void) { }
static inline void quarantine_remove_cache(struct kmem_cache *cache) { } static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
#endif #endif
#ifdef CONFIG_KASAN_SW_TAGS #ifndef arch_kasan_set_tag
static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
{
return addr;
}
#endif
#ifndef arch_kasan_get_tag
#define arch_kasan_get_tag(addr) 0
#endif
void print_tags(u8 addr_tag, const void *addr); #define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
#define get_tag(addr) arch_kasan_get_tag(addr)
u8 random_tag(void); #ifdef CONFIG_KASAN_HW_TAGS
#ifndef arch_enable_tagging
#define arch_enable_tagging()
#endif
#ifndef arch_init_tags
#define arch_init_tags(max_tag)
#endif
#ifndef arch_get_random_tag
#define arch_get_random_tag() (0xFF)
#endif
#ifndef arch_get_mem_tag
#define arch_get_mem_tag(addr) (0xFF)
#endif
#ifndef arch_set_mem_tag_range
#define arch_set_mem_tag_range(addr, size, tag) ((void *)(addr))
#endif
#define hw_enable_tagging() arch_enable_tagging()
#define hw_init_tags(max_tag) arch_init_tags(max_tag)
#define hw_get_random_tag() arch_get_random_tag()
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
#define hw_set_mem_tag_range(addr, size, tag) arch_set_mem_tag_range((addr), (size), (tag))
#endif /* CONFIG_KASAN_HW_TAGS */
#ifdef CONFIG_KASAN_SW_TAGS
u8 random_tag(void);
#elif defined(CONFIG_KASAN_HW_TAGS)
static inline u8 random_tag(void) { return hw_get_random_tag(); }
#else #else
static inline u8 random_tag(void) { return 0; }
#endif
static inline void print_tags(u8 addr_tag, const void *addr) { } #ifdef CONFIG_KASAN_HW_TAGS
static inline u8 random_tag(void) static inline void poison_range(const void *address, size_t size, u8 value)
{ {
return 0; hw_set_mem_tag_range(kasan_reset_tag(address),
round_up(size, KASAN_GRANULE_SIZE), value);
} }
#endif static inline void unpoison_range(const void *address, size_t size)
{
hw_set_mem_tag_range(kasan_reset_tag(address),
round_up(size, KASAN_GRANULE_SIZE), get_tag(address));
}
#ifndef arch_kasan_set_tag static inline bool check_invalid_free(void *addr)
static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
{ {
return addr; u8 ptr_tag = get_tag(addr);
u8 mem_tag = hw_get_mem_tag(addr);
return (mem_tag == KASAN_TAG_INVALID) ||
(ptr_tag != KASAN_TAG_KERNEL && ptr_tag != mem_tag);
} }
#endif
#ifndef arch_kasan_reset_tag
#define arch_kasan_reset_tag(addr) ((void *)(addr))
#endif
#ifndef arch_kasan_get_tag
#define arch_kasan_get_tag(addr) 0
#endif
#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag))) #else /* CONFIG_KASAN_HW_TAGS */
#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr))
#define get_tag(addr) arch_kasan_get_tag(addr) void poison_range(const void *address, size_t size, u8 value);
void unpoison_range(const void *address, size_t size);
bool check_invalid_free(void *addr);
#endif /* CONFIG_KASAN_HW_TAGS */
/* /*
* Exported functions for interfaces called from assembly or from generated * Exported functions for interfaces called from assembly or from generated
......
...@@ -6,16 +6,6 @@ ...@@ -6,16 +6,6 @@
* Copyright (C) 2016 Google, Inc. * Copyright (C) 2016 Google, Inc.
* *
* Based on code by Dmitry Chernenkov. * Based on code by Dmitry Chernenkov.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
*/ */
#include <linux/gfp.h> #include <linux/gfp.h>
...@@ -147,7 +137,12 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) ...@@ -147,7 +137,12 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
if (IS_ENABLED(CONFIG_SLAB)) if (IS_ENABLED(CONFIG_SLAB))
local_irq_save(flags); local_irq_save(flags);
/*
* As the object now gets freed from the quaratine, assume that its
* free track is no longer valid.
*/
*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE; *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE;
___cache_free(cache, object, _THIS_IP_); ___cache_free(cache, object, _THIS_IP_);
if (IS_ENABLED(CONFIG_SLAB)) if (IS_ENABLED(CONFIG_SLAB))
...@@ -173,11 +168,19 @@ static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) ...@@ -173,11 +168,19 @@ static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
qlist_init(q); qlist_init(q);
} }
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) bool quarantine_put(struct kmem_cache *cache, void *object)
{ {
unsigned long flags; unsigned long flags;
struct qlist_head *q; struct qlist_head *q;
struct qlist_head temp = QLIST_INIT; struct qlist_head temp = QLIST_INIT;
struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
/*
* If there's no metadata for this object, don't put it into
* quarantine.
*/
if (!meta)
return false;
/* /*
* Note: irq must be disabled until after we move the batch to the * Note: irq must be disabled until after we move the batch to the
...@@ -192,9 +195,9 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) ...@@ -192,9 +195,9 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
q = this_cpu_ptr(&cpu_quarantine); q = this_cpu_ptr(&cpu_quarantine);
if (q->offline) { if (q->offline) {
local_irq_restore(flags); local_irq_restore(flags);
return; return false;
} }
qlist_put(q, &info->quarantine_link, cache->size); qlist_put(q, &meta->quarantine_link, cache->size);
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
qlist_move_all(q, &temp); qlist_move_all(q, &temp);
...@@ -215,6 +218,8 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) ...@@ -215,6 +218,8 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
} }
local_irq_restore(flags); local_irq_restore(flags);
return true;
} }
void quarantine_reduce(void) void quarantine_reduce(void)
......
This diff is collapsed.
...@@ -7,11 +7,6 @@ ...@@ -7,11 +7,6 @@
* *
* Some code borrowed from https://github.com/xairy/kasan-prototype by * Some code borrowed from https://github.com/xairy/kasan-prototype by
* Andrey Konovalov <andreyknvl@gmail.com> * Andrey Konovalov <andreyknvl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/ */
#include <linux/bitops.h> #include <linux/bitops.h>
...@@ -21,6 +16,7 @@ ...@@ -21,6 +16,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
...@@ -39,7 +35,7 @@ void *find_first_bad_addr(void *addr, size_t size) ...@@ -39,7 +35,7 @@ void *find_first_bad_addr(void *addr, size_t size)
void *p = addr; void *p = addr;
while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p))) while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
p += KASAN_SHADOW_SCALE_SIZE; p += KASAN_GRANULE_SIZE;
return p; return p;
} }
...@@ -51,14 +47,14 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info) ...@@ -51,14 +47,14 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr); shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
/* /*
* If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look * If shadow byte value is in [0, KASAN_GRANULE_SIZE) we can look
* at the next shadow byte to determine the type of the bad access. * at the next shadow byte to determine the type of the bad access.
*/ */
if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1) if (*shadow_addr > 0 && *shadow_addr <= KASAN_GRANULE_SIZE - 1)
shadow_addr++; shadow_addr++;
switch (*shadow_addr) { switch (*shadow_addr) {
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: case 0 ... KASAN_GRANULE_SIZE - 1:
/* /*
* In theory it's still possible to see these shadow values * In theory it's still possible to see these shadow values
* due to a data race in the kernel code. * due to a data race in the kernel code.
...@@ -122,11 +118,177 @@ const char *get_bug_type(struct kasan_access_info *info) ...@@ -122,11 +118,177 @@ const char *get_bug_type(struct kasan_access_info *info)
if (info->access_addr + info->access_size < info->access_addr) if (info->access_addr + info->access_size < info->access_addr)
return "out-of-bounds"; return "out-of-bounds";
if (addr_has_shadow(info->access_addr)) if (addr_has_metadata(info->access_addr))
return get_shadow_bug_type(info); return get_shadow_bug_type(info);
return get_wild_bug_type(info); return get_wild_bug_type(info);
} }
void metadata_fetch_row(char *buffer, void *row)
{
memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
}
#if CONFIG_KASAN_STACK
static bool __must_check tokenize_frame_descr(const char **frame_descr,
char *token, size_t max_tok_len,
unsigned long *value)
{
const char *sep = strchr(*frame_descr, ' ');
if (sep == NULL)
sep = *frame_descr + strlen(*frame_descr);
if (token != NULL) {
const size_t tok_len = sep - *frame_descr;
if (tok_len + 1 > max_tok_len) {
pr_err("KASAN internal error: frame description too long: %s\n",
*frame_descr);
return false;
}
/* Copy token (+ 1 byte for '\0'). */
strlcpy(token, *frame_descr, tok_len + 1);
}
/* Advance frame_descr past separator. */
*frame_descr = sep + 1;
if (value != NULL && kstrtoul(token, 10, value)) {
pr_err("KASAN internal error: not a valid number: %s\n", token);
return false;
}
return true;
}
static void print_decoded_frame_descr(const char *frame_descr)
{
/*
* We need to parse the following string:
* "n alloc_1 alloc_2 ... alloc_n"
* where alloc_i looks like
* "offset size len name"
* or "offset size len name:line".
*/
char token[64];
unsigned long num_objects;
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
&num_objects))
return;
pr_err("\n");
pr_err("this frame has %lu %s:\n", num_objects,
num_objects == 1 ? "object" : "objects");
while (num_objects--) {
unsigned long offset;
unsigned long size;
/* access offset */
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
&offset))
return;
/* access size */
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
&size))
return;
/* name length (unused) */
if (!tokenize_frame_descr(&frame_descr, NULL, 0, NULL))
return;
/* object name */
if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
NULL))
return;
/* Strip line number; without filename it's not very helpful. */
strreplace(token, ':', '\0');
/* Finally, print object information. */
pr_err(" [%lu, %lu) '%s'", offset, offset + size, token);
}
}
static bool __must_check get_address_stack_frame_info(const void *addr,
unsigned long *offset,
const char **frame_descr,
const void **frame_pc)
{
unsigned long aligned_addr;
unsigned long mem_ptr;
const u8 *shadow_bottom;
const u8 *shadow_ptr;
const unsigned long *frame;
BUILD_BUG_ON(IS_ENABLED(CONFIG_STACK_GROWSUP));
/*
* NOTE: We currently only support printing frame information for
* accesses to the task's own stack.
*/
if (!object_is_on_stack(addr))
return false;
aligned_addr = round_down((unsigned long)addr, sizeof(long));
mem_ptr = round_down(aligned_addr, KASAN_GRANULE_SIZE);
shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
shadow_ptr--;
mem_ptr -= KASAN_GRANULE_SIZE;
}
while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
shadow_ptr--;
mem_ptr -= KASAN_GRANULE_SIZE;
}
if (shadow_ptr < shadow_bottom)
return false;
frame = (const unsigned long *)(mem_ptr + KASAN_GRANULE_SIZE);
if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
frame[0]);
return false;
}
*offset = (unsigned long)addr - (unsigned long)frame;
*frame_descr = (const char *)frame[1];
*frame_pc = (void *)frame[2];
return true;
}
void print_address_stack_frame(const void *addr)
{
unsigned long offset;
const char *frame_descr;
const void *frame_pc;
if (!get_address_stack_frame_info(addr, &offset, &frame_descr,
&frame_pc))
return;
/*
* get_address_stack_frame_info only returns true if the given addr is
* on the current task's stack.
*/
pr_err("\n");
pr_err("addr %px is located in stack of task %s/%d at offset %lu in frame:\n",
addr, current->comm, task_pid_nr(current), offset);
pr_err(" %pS\n", frame_pc);
if (!frame_descr)
return;
print_decoded_frame_descr(frame_descr);
}
#endif /* CONFIG_KASAN_STACK */
#define DEFINE_ASAN_REPORT_LOAD(size) \ #define DEFINE_ASAN_REPORT_LOAD(size) \
void __asan_report_load##size##_noabort(unsigned long addr) \ void __asan_report_load##size##_noabort(unsigned long addr) \
{ \ { \
......
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains hardware tag-based KASAN specific error reporting code.
*
* Copyright (c) 2020 Google, Inc.
* Author: Andrey Konovalov <andreyknvl@google.com>
*/
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/types.h>
#include "kasan.h"
const char *get_bug_type(struct kasan_access_info *info)
{
return "invalid-access";
}
void *find_first_bad_addr(void *addr, size_t size)
{
return kasan_reset_tag(addr);
}
void metadata_fetch_row(char *buffer, void *row)
{
int i;
for (i = 0; i < META_BYTES_PER_ROW; i++)
buffer[i] = hw_get_mem_tag(row + i * KASAN_GRANULE_SIZE);
}
void print_tags(u8 addr_tag, const void *addr)
{
u8 memory_tag = hw_get_mem_tag((void *)addr);
pr_err("Pointer tag: [%02x], memory tag: [%02x]\n",
addr_tag, memory_tag);
}
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* This file contains tag-based KASAN specific error reporting code. * This file contains software tag-based KASAN specific error reporting code.
* *
* Copyright (c) 2014 Samsung Electronics Co., Ltd. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
* *
* Some code borrowed from https://github.com/xairy/kasan-prototype by * Some code borrowed from https://github.com/xairy/kasan-prototype by
* Andrey Konovalov <andreyknvl@gmail.com> * Andrey Konovalov <andreyknvl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/ */
#include <linux/bitops.h> #include <linux/bitops.h>
...@@ -46,16 +41,19 @@ const char *get_bug_type(struct kasan_access_info *info) ...@@ -46,16 +41,19 @@ const char *get_bug_type(struct kasan_access_info *info)
int i; int i;
tag = get_tag(info->access_addr); tag = get_tag(info->access_addr);
addr = reset_tag(info->access_addr); addr = kasan_reset_tag(info->access_addr);
page = kasan_addr_to_page(addr); page = kasan_addr_to_page(addr);
if (page && PageSlab(page)) { if (page && PageSlab(page)) {
cache = page->slab_cache; cache = page->slab_cache;
object = nearest_obj(cache, page, (void *)addr); object = nearest_obj(cache, page, (void *)addr);
alloc_meta = get_alloc_info(cache, object); alloc_meta = kasan_get_alloc_meta(cache, object);
for (i = 0; i < KASAN_NR_FREE_STACKS; i++) if (alloc_meta) {
if (alloc_meta->free_pointer_tag[i] == tag) for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
return "use-after-free"; if (alloc_meta->free_pointer_tag[i] == tag)
return "use-after-free";
}
}
return "out-of-bounds"; return "out-of-bounds";
} }
...@@ -77,14 +75,19 @@ const char *get_bug_type(struct kasan_access_info *info) ...@@ -77,14 +75,19 @@ const char *get_bug_type(struct kasan_access_info *info)
void *find_first_bad_addr(void *addr, size_t size) void *find_first_bad_addr(void *addr, size_t size)
{ {
u8 tag = get_tag(addr); u8 tag = get_tag(addr);
void *p = reset_tag(addr); void *p = kasan_reset_tag(addr);
void *end = p + size; void *end = p + size;
while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p)) while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
p += KASAN_SHADOW_SCALE_SIZE; p += KASAN_GRANULE_SIZE;
return p; return p;
} }
void metadata_fetch_row(char *buffer, void *row)
{
memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
}
void print_tags(u8 addr_tag, const void *addr) void print_tags(u8 addr_tag, const void *addr)
{ {
u8 *shadow = (u8 *)kasan_mem_to_shadow(addr); u8 *shadow = (u8 *)kasan_mem_to_shadow(addr);
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* This file contains core tag-based KASAN code. * This file contains core software tag-based KASAN code.
* *
* Copyright (c) 2018 Google, Inc. * Copyright (c) 2018 Google, Inc.
* Author: Andrey Konovalov <andreyknvl@google.com> * Author: Andrey Konovalov <andreyknvl@google.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) "kasan: " fmt
#include <linux/export.h> #include <linux/export.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -40,12 +35,14 @@ ...@@ -40,12 +35,14 @@
static DEFINE_PER_CPU(u32, prng_state); static DEFINE_PER_CPU(u32, prng_state);
void kasan_init_tags(void) void __init kasan_init_sw_tags(void)
{ {
int cpu; int cpu;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = (u32)get_cycles(); per_cpu(prng_state, cpu) = (u32)get_cycles();
pr_info("KernelAddressSanitizer initialized\n");
} }
/* /*
...@@ -70,11 +67,6 @@ u8 random_tag(void) ...@@ -70,11 +67,6 @@ u8 random_tag(void)
return (u8)(state % (KASAN_TAG_MAX + 1)); return (u8)(state % (KASAN_TAG_MAX + 1));
} }
void *kasan_reset_tag(const void *addr)
{
return reset_tag(addr);
}
bool check_memory_region(unsigned long addr, size_t size, bool write, bool check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip) unsigned long ret_ip)
{ {
...@@ -110,7 +102,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write, ...@@ -110,7 +102,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
if (tag == KASAN_TAG_KERNEL) if (tag == KASAN_TAG_KERNEL)
return true; return true;
untagged_addr = reset_tag((const void *)addr); untagged_addr = kasan_reset_tag((const void *)addr);
if (unlikely(untagged_addr < if (unlikely(untagged_addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
return !kasan_report(addr, size, write, ret_ip); return !kasan_report(addr, size, write, ret_ip);
...@@ -126,6 +118,15 @@ bool check_memory_region(unsigned long addr, size_t size, bool write, ...@@ -126,6 +118,15 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
return true; return true;
} }
bool check_invalid_free(void *addr)
{
u8 tag = get_tag(addr);
u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(kasan_reset_tag(addr)));
return (shadow_byte == KASAN_TAG_INVALID) ||
(tag != KASAN_TAG_KERNEL && tag != shadow_byte);
}
#define DEFINE_HWASAN_LOAD_STORE(size) \ #define DEFINE_HWASAN_LOAD_STORE(size) \
void __hwasan_load##size##_noabort(unsigned long addr) \ void __hwasan_load##size##_noabort(unsigned long addr) \
{ \ { \
...@@ -158,7 +159,7 @@ EXPORT_SYMBOL(__hwasan_storeN_noabort); ...@@ -158,7 +159,7 @@ EXPORT_SYMBOL(__hwasan_storeN_noabort);
void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
{ {
kasan_poison_shadow((void *)addr, size, tag); poison_range((void *)addr, size, tag);
} }
EXPORT_SYMBOL(__hwasan_tag_memory); EXPORT_SYMBOL(__hwasan_tag_memory);
...@@ -168,7 +169,9 @@ void kasan_set_free_info(struct kmem_cache *cache, ...@@ -168,7 +169,9 @@ void kasan_set_free_info(struct kmem_cache *cache,
struct kasan_alloc_meta *alloc_meta; struct kasan_alloc_meta *alloc_meta;
u8 idx = 0; u8 idx = 0;
alloc_meta = get_alloc_info(cache, object); alloc_meta = kasan_get_alloc_meta(cache, object);
if (!alloc_meta)
return;
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
idx = alloc_meta->free_track_idx; idx = alloc_meta->free_track_idx;
...@@ -185,7 +188,9 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, ...@@ -185,7 +188,9 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
struct kasan_alloc_meta *alloc_meta; struct kasan_alloc_meta *alloc_meta;
int i = 0; int i = 0;
alloc_meta = get_alloc_info(cache, object); alloc_meta = kasan_get_alloc_meta(cache, object);
if (!alloc_meta)
return NULL;
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
for (i = 0; i < KASAN_NR_FREE_STACKS; i++) { for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
......
...@@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element) ...@@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
static __always_inline void kasan_poison_element(mempool_t *pool, void *element) static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
{ {
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_poison_kfree(element, _RET_IP_); kasan_slab_free_mempool(element, _RET_IP_);
else if (pool->alloc == mempool_alloc_pages) else if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data); kasan_free_pages(element, (unsigned long)pool->pool_data);
} }
...@@ -112,7 +112,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element) ...@@ -112,7 +112,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
static void kasan_unpoison_element(mempool_t *pool, void *element) static void kasan_unpoison_element(mempool_t *pool, void *element)
{ {
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_slab(element); kasan_unpoison_range(element, __ksize(element));
else if (pool->alloc == mempool_alloc_pages) else if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data); kasan_alloc_pages(element, (unsigned long)pool->pool_data);
} }
......
...@@ -1204,8 +1204,10 @@ static void kernel_init_free_pages(struct page *page, int numpages) ...@@ -1204,8 +1204,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
/* s390's use of memset() could override KASAN redzones. */ /* s390's use of memset() could override KASAN redzones. */
kasan_disable_current(); kasan_disable_current();
for (i = 0; i < numpages; i++) for (i = 0; i < numpages; i++) {
page_kasan_tag_reset(page + i);
clear_highpage(page + i); clear_highpage(page + i);
}
kasan_enable_current(); kasan_enable_current();
} }
...@@ -7671,6 +7673,11 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char ...@@ -7671,6 +7673,11 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
* alias for the memset(). * alias for the memset().
*/ */
direct_map_addr = page_address(page); direct_map_addr = page_address(page);
/*
* Perform a kasan-unchecked memset() since this memory
* has not been initialized.
*/
direct_map_addr = kasan_reset_tag(direct_map_addr);
if ((unsigned int)poison <= 0xFF) if ((unsigned int)poison <= 0xFF)
memset(direct_map_addr, poison, PAGE_SIZE); memset(direct_map_addr, poison, PAGE_SIZE);
......
...@@ -25,7 +25,7 @@ static void poison_page(struct page *page) ...@@ -25,7 +25,7 @@ static void poison_page(struct page *page)
/* KASAN still think the page is in-use, so skip it. */ /* KASAN still think the page is in-use, so skip it. */
kasan_disable_current(); kasan_disable_current();
memset(addr, PAGE_POISON, PAGE_SIZE); memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
kasan_enable_current(); kasan_enable_current();
kunmap_atomic(addr); kunmap_atomic(addr);
} }
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <linux/ptdump.h> #include <linux/ptdump.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/* /*
* This is an optimization for KASAN=y case. Since all kasan page tables * This is an optimization for KASAN=y case. Since all kasan page tables
* eventually point to the kasan_early_shadow_page we could call note_page() * eventually point to the kasan_early_shadow_page we could call note_page()
...@@ -31,7 +31,8 @@ static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr, ...@@ -31,7 +31,8 @@ static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
pgd_t val = READ_ONCE(*pgd); pgd_t val = READ_ONCE(*pgd);
#if CONFIG_PGTABLE_LEVELS > 4 && defined(CONFIG_KASAN) #if CONFIG_PGTABLE_LEVELS > 4 && \
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
return note_kasan_page_table(walk, addr); return note_kasan_page_table(walk, addr);
#endif #endif
...@@ -51,7 +52,8 @@ static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr, ...@@ -51,7 +52,8 @@ static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
p4d_t val = READ_ONCE(*p4d); p4d_t val = READ_ONCE(*p4d);
#if CONFIG_PGTABLE_LEVELS > 3 && defined(CONFIG_KASAN) #if CONFIG_PGTABLE_LEVELS > 3 && \
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
return note_kasan_page_table(walk, addr); return note_kasan_page_table(walk, addr);
#endif #endif
...@@ -71,7 +73,8 @@ static int ptdump_pud_entry(pud_t *pud, unsigned long addr, ...@@ -71,7 +73,8 @@ static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
pud_t val = READ_ONCE(*pud); pud_t val = READ_ONCE(*pud);
#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_KASAN) #if CONFIG_PGTABLE_LEVELS > 2 && \
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd))) if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
return note_kasan_page_table(walk, addr); return note_kasan_page_table(walk, addr);
#endif #endif
...@@ -91,7 +94,7 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr, ...@@ -91,7 +94,7 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
pmd_t val = READ_ONCE(*pmd); pmd_t val = READ_ONCE(*pmd);
#if defined(CONFIG_KASAN) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte))) if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
return note_kasan_page_table(walk, addr); return note_kasan_page_table(walk, addr);
#endif #endif
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/kasan.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -53,7 +54,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, ...@@ -53,7 +54,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
*/ */
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB | SLAB_KASAN) SLAB_FAILSLAB | kasan_never_merge())
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_ACCOUNT) SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
...@@ -1176,7 +1177,7 @@ size_t ksize(const void *objp) ...@@ -1176,7 +1177,7 @@ size_t ksize(const void *objp)
* We assume that ksize callers could use whole allocated area, * We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area. * so we need to unpoison this area.
*/ */
kasan_unpoison_shadow(objp, size); kasan_unpoison_range(objp, size);
return size; return size;
} }
EXPORT_SYMBOL(ksize); EXPORT_SYMBOL(ksize);
......
...@@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, ...@@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
{ {
#ifdef CONFIG_SLAB_FREELIST_HARDENED #ifdef CONFIG_SLAB_FREELIST_HARDENED
/* /*
* When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged. * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
* Normally, this doesn't cause any issues, as both set_freepointer() * Normally, this doesn't cause any issues, as both set_freepointer()
* and get_freepointer() are called with a pointer with the same tag. * and get_freepointer() are called with a pointer with the same tag.
* However, there are some issues with CONFIG_SLUB_DEBUG code. For * However, there are some issues with CONFIG_SLUB_DEBUG code. For
...@@ -275,6 +275,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s, ...@@ -275,6 +275,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s,
static inline void *get_freepointer(struct kmem_cache *s, void *object) static inline void *get_freepointer(struct kmem_cache *s, void *object)
{ {
object = kasan_reset_tag(object);
return freelist_dereference(s, object + s->offset); return freelist_dereference(s, object + s->offset);
} }
...@@ -304,6 +305,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) ...@@ -304,6 +305,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
BUG_ON(object == fp); /* naive detection of double free or corruption */ BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif #endif
freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
} }
...@@ -538,8 +540,8 @@ static void print_section(char *level, char *text, u8 *addr, ...@@ -538,8 +540,8 @@ static void print_section(char *level, char *text, u8 *addr,
unsigned int length) unsigned int length)
{ {
metadata_access_enable(); metadata_access_enable();
print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
length, 1); 16, 1, addr, length, 1);
metadata_access_disable(); metadata_access_disable();
} }
...@@ -570,7 +572,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, ...@@ -570,7 +572,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
p = object + get_info_end(s); p = object + get_info_end(s);
return p + alloc; return kasan_reset_tag(p + alloc);
} }
static void set_track(struct kmem_cache *s, void *object, static void set_track(struct kmem_cache *s, void *object,
...@@ -583,7 +585,8 @@ static void set_track(struct kmem_cache *s, void *object, ...@@ -583,7 +585,8 @@ static void set_track(struct kmem_cache *s, void *object,
unsigned int nr_entries; unsigned int nr_entries;
metadata_access_enable(); metadata_access_enable();
nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
TRACK_ADDRS_COUNT, 3);
metadata_access_disable(); metadata_access_disable();
if (nr_entries < TRACK_ADDRS_COUNT) if (nr_entries < TRACK_ADDRS_COUNT)
...@@ -747,7 +750,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, ...@@ -747,7 +750,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
static void init_object(struct kmem_cache *s, void *object, u8 val) static void init_object(struct kmem_cache *s, void *object, u8 val)
{ {
u8 *p = object; u8 *p = kasan_reset_tag(object);
if (s->flags & SLAB_RED_ZONE) if (s->flags & SLAB_RED_ZONE)
memset(p - s->red_left_pad, val, s->red_left_pad); memset(p - s->red_left_pad, val, s->red_left_pad);
...@@ -777,7 +780,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, ...@@ -777,7 +780,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
u8 *addr = page_address(page); u8 *addr = page_address(page);
metadata_access_enable(); metadata_access_enable();
fault = memchr_inv(start, value, bytes); fault = memchr_inv(kasan_reset_tag(start), value, bytes);
metadata_access_disable(); metadata_access_disable();
if (!fault) if (!fault)
return 1; return 1;
...@@ -873,7 +876,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) ...@@ -873,7 +876,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
pad = end - remainder; pad = end - remainder;
metadata_access_enable(); metadata_access_enable();
fault = memchr_inv(pad, POISON_INUSE, remainder); fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
metadata_access_disable(); metadata_access_disable();
if (!fault) if (!fault)
return 1; return 1;
...@@ -1118,7 +1121,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) ...@@ -1118,7 +1121,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
return; return;
metadata_access_enable(); metadata_access_enable();
memset(addr, POISON_INUSE, page_size(page)); memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
metadata_access_disable(); metadata_access_disable();
} }
...@@ -1566,10 +1569,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, ...@@ -1566,10 +1569,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
* Clear the object and the metadata, but don't touch * Clear the object and the metadata, but don't touch
* the redzone. * the redzone.
*/ */
memset(object, 0, s->object_size); memset(kasan_reset_tag(object), 0, s->object_size);
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
: 0; : 0;
memset((char *)object + s->inuse, 0, memset((char *)kasan_reset_tag(object) + s->inuse, 0,
s->size - s->inuse - rsize); s->size - s->inuse - rsize);
} }
...@@ -2881,10 +2884,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -2881,10 +2884,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
stat(s, ALLOC_FASTPATH); stat(s, ALLOC_FASTPATH);
} }
maybe_wipe_obj_freeptr(s, object); maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size); memset(kasan_reset_tag(object), 0, s->object_size);
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
......
...@@ -148,10 +148,12 @@ endif ...@@ -148,10 +148,12 @@ endif
# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE) # we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
# #
ifeq ($(CONFIG_KASAN),y) ifeq ($(CONFIG_KASAN),y)
ifneq ($(CONFIG_KASAN_HW_TAGS),y)
_c_flags += $(if $(patsubst n%,, \ _c_flags += $(if $(patsubst n%,, \
$(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
$(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
endif endif
endif
ifeq ($(CONFIG_UBSAN),y) ifeq ($(CONFIG_UBSAN),y)
_c_flags += $(if $(patsubst n%,, \ _c_flags += $(if $(patsubst n%,, \
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2020 ARM Limited # Copyright (C) 2020 ARM Limited
CFLAGS += -std=gnu99 -I. CFLAGS += -std=gnu99 -I. -lpthread
SRCS := $(filter-out mte_common_util.c,$(wildcard *.c)) SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
PROGS := $(patsubst %.c,%,$(SRCS)) PROGS := $(patsubst %.c,%,$(SRCS))
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment