Commit b9baf5c8 authored by Russell King (Oracle)'s avatar Russell King (Oracle)

ARM: Spectre-BHB workaround

Workaround the Spectre BHB issues for Cortex-A15, Cortex-A57,
Cortex-A72, Cortex-A73 and Cortex-A75. We also include Brahma B15 as
well to be safe, which is affected by Spectre V2 in the same ways as
Cortex-A15.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
parent 8d9d651f
...@@ -107,6 +107,16 @@ ...@@ -107,6 +107,16 @@
.endm .endm
#endif #endif
#if __LINUX_ARM_ARCH__ < 7
.macro dsb, args
mcr p15, 0, r0, c7, c10, 4
.endm
.macro isb, args
mcr p15, 0, r0, c7, r5, 4
.endm
#endif
.macro asm_trace_hardirqs_off, save=1 .macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS) #if defined(CONFIG_TRACE_IRQFLAGS)
.if \save .if \save
......
...@@ -14,6 +14,7 @@ enum { ...@@ -14,6 +14,7 @@ enum {
__SPECTRE_V2_METHOD_ICIALLU, __SPECTRE_V2_METHOD_ICIALLU,
__SPECTRE_V2_METHOD_SMC, __SPECTRE_V2_METHOD_SMC,
__SPECTRE_V2_METHOD_HVC, __SPECTRE_V2_METHOD_HVC,
__SPECTRE_V2_METHOD_LOOP8,
}; };
enum { enum {
...@@ -21,8 +22,11 @@ enum { ...@@ -21,8 +22,11 @@ enum {
SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU), SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC), SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC), SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
}; };
void spectre_v2_update_state(unsigned int state, unsigned int methods); void spectre_v2_update_state(unsigned int state, unsigned int methods);
int spectre_bhb_update_vectors(unsigned int method);
#endif #endif
...@@ -116,11 +116,23 @@ ...@@ -116,11 +116,23 @@
*/ */
#define ARM_VECTORS \ #define ARM_VECTORS \
__vectors_lma = .; \ __vectors_lma = .; \
.vectors 0xffff0000 : AT(__vectors_start) { \ OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
.vectors { \
*(.vectors) \ *(.vectors) \
} \ } \
.vectors.bhb.loop8 { \
*(.vectors.bhb.loop8) \
} \
.vectors.bhb.bpiall { \
*(.vectors.bhb.bpiall) \
} \
} \
ARM_LMA(__vectors, .vectors); \ ARM_LMA(__vectors, .vectors); \
. = __vectors_lma + SIZEOF(.vectors); \ ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \
ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \
. = __vectors_lma + SIZEOF(.vectors) + \
SIZEOF(.vectors.bhb.loop8) + \
SIZEOF(.vectors.bhb.bpiall); \
\ \
__stubs_lma = .; \ __stubs_lma = .; \
.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \ .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
......
...@@ -1002,12 +1002,11 @@ vector_\name: ...@@ -1002,12 +1002,11 @@ vector_\name:
sub lr, lr, #\correction sub lr, lr, #\correction
.endif .endif
@ @ Save r0, lr_<exception> (parent PC)
@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
@ (parent CPSR)
@
stmia sp, {r0, lr} @ save r0, lr stmia sp, {r0, lr} @ save r0, lr
mrs lr, spsr
@ Save spsr_<exception> (parent CPSR)
2: mrs lr, spsr
str lr, [sp, #8] @ save spsr str lr, [sp, #8] @ save spsr
@ @
...@@ -1028,6 +1027,44 @@ vector_\name: ...@@ -1028,6 +1027,44 @@ vector_\name:
movs pc, lr @ branch to handler in SVC mode movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name) ENDPROC(vector_\name)
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.subsection 1
.align 5
vector_bhb_loop8_\name:
.if \correction
sub lr, lr, #\correction
.endif
@ Save r0, lr_<exception> (parent PC)
stmia sp, {r0, lr}
@ bhb workaround
mov r0, #8
1: b . + 4
subs r0, r0, #1
bne 1b
dsb
isb
b 2b
ENDPROC(vector_bhb_loop8_\name)
vector_bhb_bpiall_\name:
.if \correction
sub lr, lr, #\correction
.endif
@ Save r0, lr_<exception> (parent PC)
stmia sp, {r0, lr}
@ bhb workaround
mcr p15, 0, r0, c7, c5, 6 @ BPIALL
@ isb not needed due to "movs pc, lr" in the vector stub
@ which gives a "context synchronisation".
b 2b
ENDPROC(vector_bhb_bpiall_\name)
.previous
#endif
.align 2 .align 2
@ handler addresses follow this label @ handler addresses follow this label
1: 1:
...@@ -1036,6 +1073,10 @@ ENDPROC(vector_\name) ...@@ -1036,6 +1073,10 @@ ENDPROC(vector_\name)
.section .stubs, "ax", %progbits .section .stubs, "ax", %progbits
@ This must be the first word @ This must be the first word
.word vector_swi .word vector_swi
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.word vector_bhb_loop8_swi
.word vector_bhb_bpiall_swi
#endif
vector_rst: vector_rst:
ARM( swi SYS_ERROR0 ) ARM( swi SYS_ERROR0 )
...@@ -1150,8 +1191,10 @@ vector_addrexcptn: ...@@ -1150,8 +1191,10 @@ vector_addrexcptn:
* FIQ "NMI" handler * FIQ "NMI" handler
*----------------------------------------------------------------------------- *-----------------------------------------------------------------------------
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
* systems. * systems. This must be the last vector stub, so lets place it in its own
* subsection.
*/ */
.subsection 2
vector_stub fiq, FIQ_MODE, 4 vector_stub fiq, FIQ_MODE, 4
.long __fiq_usr @ 0 (USR_26 / USR_32) .long __fiq_usr @ 0 (USR_26 / USR_32)
...@@ -1184,6 +1227,30 @@ vector_addrexcptn: ...@@ -1184,6 +1227,30 @@ vector_addrexcptn:
W(b) vector_irq W(b) vector_irq
W(b) vector_fiq W(b) vector_fiq
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.section .vectors.bhb.loop8, "ax", %progbits
.L__vectors_bhb_loop8_start:
W(b) vector_rst
W(b) vector_bhb_loop8_und
W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004
W(b) vector_bhb_loop8_pabt
W(b) vector_bhb_loop8_dabt
W(b) vector_addrexcptn
W(b) vector_bhb_loop8_irq
W(b) vector_bhb_loop8_fiq
.section .vectors.bhb.bpiall, "ax", %progbits
.L__vectors_bhb_bpiall_start:
W(b) vector_rst
W(b) vector_bhb_bpiall_und
W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008
W(b) vector_bhb_bpiall_pabt
W(b) vector_bhb_bpiall_dabt
W(b) vector_addrexcptn
W(b) vector_bhb_bpiall_irq
W(b) vector_bhb_bpiall_fiq
#endif
.data .data
.align 2 .align 2
......
...@@ -153,6 +153,29 @@ ENDPROC(ret_from_fork) ...@@ -153,6 +153,29 @@ ENDPROC(ret_from_fork)
*----------------------------------------------------------------------------- *-----------------------------------------------------------------------------
*/ */
.align 5
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
ENTRY(vector_bhb_loop8_swi)
sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12}
mov r8, #8
1: b 2f
2: subs r8, r8, #1
bne 1b
dsb
isb
b 3f
ENDPROC(vector_bhb_loop8_swi)
.align 5
ENTRY(vector_bhb_bpiall_swi)
sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12}
mcr p15, 0, r8, c7, c5, 6 @ BPIALL
isb
b 3f
ENDPROC(vector_bhb_bpiall_swi)
#endif
.align 5 .align 5
ENTRY(vector_swi) ENTRY(vector_swi)
#ifdef CONFIG_CPU_V7M #ifdef CONFIG_CPU_V7M
...@@ -160,6 +183,7 @@ ENTRY(vector_swi) ...@@ -160,6 +183,7 @@ ENTRY(vector_swi)
#else #else
sub sp, sp, #PT_REGS_SIZE sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12 stmia sp, {r0 - r12} @ Calling r0 - r12
3:
ARM( add r8, sp, #S_PC ) ARM( add r8, sp, #S_PC )
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
THUMB( mov r8, sp ) THUMB( mov r8, sp )
......
...@@ -45,6 +45,10 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, ...@@ -45,6 +45,10 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
method = "Firmware call"; method = "Firmware call";
break; break;
case SPECTRE_V2_METHOD_LOOP8:
method = "History overwrite";
break;
default: default:
method = "Multiple mitigations"; method = "Multiple mitigations";
break; break;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/exception.h> #include <asm/exception.h>
#include <asm/spectre.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -801,6 +802,43 @@ static void flush_vectors(void *vma, size_t offset, size_t size) ...@@ -801,6 +802,43 @@ static void flush_vectors(void *vma, size_t offset, size_t size)
flush_icache_range(start, end); flush_icache_range(start, end);
} }
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
int spectre_bhb_update_vectors(unsigned int method)
{
extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
void *vec_start, *vec_end;
if (system_state >= SYSTEM_FREEING_INITMEM) {
pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
smp_processor_id());
return SPECTRE_VULNERABLE;
}
switch (method) {
case SPECTRE_V2_METHOD_LOOP8:
vec_start = __vectors_bhb_loop8_start;
vec_end = __vectors_bhb_loop8_end;
break;
case SPECTRE_V2_METHOD_BPIALL:
vec_start = __vectors_bhb_bpiall_start;
vec_end = __vectors_bhb_bpiall_end;
break;
default:
pr_err("CPU%u: unknown Spectre BHB state %d\n",
smp_processor_id(), method);
return SPECTRE_VULNERABLE;
}
copy_from_lma(vectors_page, vec_start, vec_end);
flush_vectors(vectors_page, 0, vec_end - vec_start);
return SPECTRE_MITIGATED;
}
#endif
void __init early_trap_init(void *vectors_base) void __init early_trap_init(void *vectors_base)
{ {
extern char __stubs_start[], __stubs_end[]; extern char __stubs_start[], __stubs_end[];
......
...@@ -851,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR ...@@ -851,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR
If unsure, say Y. If unsure, say Y.
config HARDEN_BRANCH_HISTORY
bool "Harden Spectre style attacks against branch history" if EXPERT
depends on CPU_SPECTRE
default y
help
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation. When
taking an exception, a sequence of branches overwrites the branch
history, or branch history is invalidated.
config TLS_REG_EMUL config TLS_REG_EMUL
bool bool
select NEED_KUSER_HELPERS select NEED_KUSER_HELPERS
......
...@@ -177,6 +177,81 @@ static void cpu_v7_spectre_v2_init(void) ...@@ -177,6 +177,81 @@ static void cpu_v7_spectre_v2_init(void)
spectre_v2_update_state(state, method); spectre_v2_update_state(state, method);
} }
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
static int spectre_bhb_method;
static const char *spectre_bhb_method_name(int method)
{
switch (method) {
case SPECTRE_V2_METHOD_LOOP8:
return "loop";
case SPECTRE_V2_METHOD_BPIALL:
return "BPIALL";
default:
return "unknown";
}
}
static int spectre_bhb_install_workaround(int method)
{
if (spectre_bhb_method != method) {
if (spectre_bhb_method) {
pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
smp_processor_id());
return SPECTRE_VULNERABLE;
}
if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
return SPECTRE_VULNERABLE;
spectre_bhb_method = method;
}
pr_info("CPU%u: Spectre BHB: using %s workaround\n",
smp_processor_id(), spectre_bhb_method_name(method));
return SPECTRE_MITIGATED;
}
#else
static int spectre_bhb_install_workaround(int method)
{
return SPECTRE_VULNERABLE;
}
#endif
static void cpu_v7_spectre_bhb_init(void)
{
unsigned int state, method = 0;
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72:
state = SPECTRE_MITIGATED;
method = SPECTRE_V2_METHOD_LOOP8;
break;
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
state = SPECTRE_MITIGATED;
method = SPECTRE_V2_METHOD_BPIALL;
break;
default:
state = SPECTRE_UNAFFECTED;
break;
}
if (state == SPECTRE_MITIGATED)
state = spectre_bhb_install_workaround(method);
spectre_v2_update_state(state, method);
}
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
u32 mask, const char *msg) u32 mask, const char *msg)
{ {
...@@ -217,4 +292,5 @@ void cpu_v7_ca15_ibe(void) ...@@ -217,4 +292,5 @@ void cpu_v7_ca15_ibe(void)
void cpu_v7_bugs_init(void) void cpu_v7_bugs_init(void)
{ {
cpu_v7_spectre_v2_init(); cpu_v7_spectre_v2_init();
cpu_v7_spectre_bhb_init();
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment