Commit 1b7eaf57 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - It turns out that the optimised string routines merged in 5.14 are
   not safe with in-kernel MTE (KASAN_HW_TAGS) because of reading beyond
   the end of a string (strcmp, strncmp). Such reading may go across a
   16 byte tag granule and cause a tag check fault. When KASAN_HW_TAGS
   is enabled, use the generic strcmp/strncmp C implementation.

 - An errata workaround for ThunderX relied on the CPU capabilities
   being enabled in a specific order. This disappeared with the
   automatic generation of the cpucaps.h file (sorted alphabetically).
   Fix it by checking the current CPU only rather than the system-wide
   capability.

 - Add system_supports_mte() checks on the kernel entry/exit path and
   thread switching to avoid unnecessary barriers and function calls on
   systems where MTE is not supported.

 - kselftests: skip arm64 tests if the required features are missing.

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Restore forced disabling of KPTI on ThunderX
  kselftest/arm64: signal: Skip tests if required features are missing
  arm64: Mitigate MTE issues with str{n}cmp()
  arm64: add MTE supported check to thread switching and syscall entry/exit
parents 4c4f0c2b 22b70e6f
...@@ -525,6 +525,11 @@ alternative_endif ...@@ -525,6 +525,11 @@ alternative_endif
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
#endif #endif
#ifdef CONFIG_KASAN_HW_TAGS
#define EXPORT_SYMBOL_NOHWKASAN(name)
#else
#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
#endif
/* /*
* Emit a 64-bit absolute little endian symbol reference in a way that * Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a * ensures that it will be resolved at build time, even when building a
......
...@@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void); ...@@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void) static inline void mte_check_tfsr_entry(void)
{ {
if (!system_supports_mte())
return;
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
static inline void mte_check_tfsr_exit(void) static inline void mte_check_tfsr_exit(void)
{ {
if (!system_supports_mte())
return;
/* /*
* The asynchronous faults are sync'ed automatically with * The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb() * TFSR_EL1 on kernel entry but for exit an explicit dsb()
......
...@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c); ...@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR #define __HAVE_ARCH_STRCHR
extern char *strchr(const char *, int c); extern char *strchr(const char *, int c);
#ifndef CONFIG_KASAN_HW_TAGS
#define __HAVE_ARCH_STRCMP #define __HAVE_ARCH_STRCMP
extern int strcmp(const char *, const char *); extern int strcmp(const char *, const char *);
#define __HAVE_ARCH_STRNCMP #define __HAVE_ARCH_STRNCMP
extern int strncmp(const char *, const char *, __kernel_size_t); extern int strncmp(const char *, const char *, __kernel_size_t);
#endif
#define __HAVE_ARCH_STRLEN #define __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *); extern __kernel_size_t strlen(const char *);
......
...@@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, ...@@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/* /*
* For reasons that aren't entirely clear, enabling KPTI on Cavium * For reasons that aren't entirely clear, enabling KPTI on Cavium
* ThunderX leads to apparent I-cache corruption of kernel text, which * ThunderX leads to apparent I-cache corruption of kernel text, which
* ends as well as you might imagine. Don't even try. * ends as well as you might imagine. Don't even try. We cannot rely
* on the cpus_have_*cap() helpers here to detect the CPU erratum
* because cpucap detection order may change. However, since we know
* affected CPUs are always in a homogeneous configuration, it is
* safe to rely on this_cpu_has_cap() here.
*/ */
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
str = "ARM64_WORKAROUND_CAVIUM_27456"; str = "ARM64_WORKAROUND_CAVIUM_27456";
__kpti_forced = -1; __kpti_forced = -1;
} }
......
...@@ -142,12 +142,7 @@ void mte_enable_kernel_async(void) ...@@ -142,12 +142,7 @@ void mte_enable_kernel_async(void)
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void) void mte_check_tfsr_el1(void)
{ {
u64 tfsr_el1; u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (!system_supports_mte())
return;
tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) { if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
/* /*
...@@ -199,6 +194,9 @@ void mte_thread_init_user(void) ...@@ -199,6 +194,9 @@ void mte_thread_init_user(void)
void mte_thread_switch(struct task_struct *next) void mte_thread_switch(struct task_struct *next)
{ {
if (!system_supports_mte())
return;
mte_update_sctlr_user(next); mte_update_sctlr_user(next);
/* /*
......
...@@ -173,4 +173,4 @@ L(done): ...@@ -173,4 +173,4 @@ L(done):
ret ret
SYM_FUNC_END_PI(strcmp) SYM_FUNC_END_PI(strcmp)
EXPORT_SYMBOL_NOKASAN(strcmp) EXPORT_SYMBOL_NOHWKASAN(strcmp)
...@@ -258,4 +258,4 @@ L(ret0): ...@@ -258,4 +258,4 @@ L(ret0):
ret ret
SYM_FUNC_END_PI(strncmp) SYM_FUNC_END_PI(strncmp)
EXPORT_SYMBOL_NOKASAN(strncmp) EXPORT_SYMBOL_NOHWKASAN(strncmp)
...@@ -266,16 +266,19 @@ int test_init(struct tdescr *td) ...@@ -266,16 +266,19 @@ int test_init(struct tdescr *td)
td->feats_supported |= FEAT_SSBS; td->feats_supported |= FEAT_SSBS;
if (getauxval(AT_HWCAP) & HWCAP_SVE) if (getauxval(AT_HWCAP) & HWCAP_SVE)
td->feats_supported |= FEAT_SVE; td->feats_supported |= FEAT_SVE;
if (feats_ok(td)) if (feats_ok(td)) {
fprintf(stderr, fprintf(stderr,
"Required Features: [%s] supported\n", "Required Features: [%s] supported\n",
feats_to_string(td->feats_required & feats_to_string(td->feats_required &
td->feats_supported)); td->feats_supported));
else } else {
fprintf(stderr, fprintf(stderr,
"Required Features: [%s] NOT supported\n", "Required Features: [%s] NOT supported\n",
feats_to_string(td->feats_required & feats_to_string(td->feats_required &
~td->feats_supported)); ~td->feats_supported));
td->result = KSFT_SKIP;
return 0;
}
} }
/* Perform test specific additional initialization */ /* Perform test specific additional initialization */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment