Commit 96d454cd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - Fix kernel build with the integrated LLVM assembler which doesn't see
   the -Wa,-march option.

 - Fix "make vdso_install" when COMPAT_VDSO is disabled.

 - Make KVM more robust if the AT S1E1R instruction triggers an
   exception (architecture corner cases).

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  KVM: arm64: Set HCR_EL2.PTW to prevent AT taking synchronous exception
  KVM: arm64: Survive synchronous exceptions caused by AT instructions
  KVM: arm64: Add kvm_extable for vaxorcism code
  arm64: vdso32: make vdso32 install conditional
  arm64: use a common .arch preamble for inline assembly
parents ef91bb19 71a7f8cb
......@@ -82,8 +82,8 @@ endif
# compiler to generate them and consequently to break the single image contract
# we pass it only to the assembler. This option is utilized only in case of non
# integrated assemblers.
ifneq ($(CONFIG_AS_HAS_ARMV8_4), y)
branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
ifeq ($(CONFIG_AS_HAS_PAC), y)
asm-arch := armv8.3-a
endif
endif
......@@ -91,7 +91,12 @@ KBUILD_CFLAGS += $(branch-prot-flags-y)
ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
# make sure to pass the newest target architecture to -march.
KBUILD_CFLAGS += -Wa,-march=armv8.4-a
asm-arch := armv8.4-a
endif
ifdef asm-arch
KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
-DARM64_ASM_ARCH='"$(asm-arch)"'
endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
......@@ -165,7 +170,8 @@ zinstall install:
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
$(if $(CONFIG_COMPAT_VDSO), \
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
......
......@@ -2,6 +2,12 @@
#ifndef __ASM_COMPILER_H
#define __ASM_COMPILER_H
#ifdef ARM64_ASM_ARCH
#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n"
#else
#define ARM64_ASM_PREAMBLE
#endif
/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
......
......@@ -71,11 +71,12 @@
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO)
HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
......
......@@ -169,6 +169,34 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
*__hyp_this_cpu_ptr(sym); \
})
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
" .long (" #from " - .), (" #to " - .)\n" \
" .popsection\n"
#define __kvm_at(at_op, addr) \
( { \
int __kvm_at_err = 0; \
u64 spsr, elr; \
asm volatile( \
" mrs %1, spsr_el2\n" \
" mrs %2, elr_el2\n" \
"1: at "at_op", %3\n" \
" isb\n" \
" b 9f\n" \
"2: msr spsr_el2, %1\n" \
" msr elr_el2, %2\n" \
" mov %w0, %4\n" \
"9:\n" \
__KVM_EXTABLE(1b, 2b) \
: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
: "r" (addr), "i" (-EFAULT)); \
__kvm_at_err; \
} )
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
......@@ -193,6 +221,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm
/*
* KVM extable for unexpected exceptions.
* In the same format _asm_extable, but output to a different section so that
* it can be mapped to EL2. The KVM version is not sorted. The caller must
* ensure:
* x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
* code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
*/
.macro _kvm_extable, from, to
.pushsection __kvm_ex_table, "a"
.align 3
.long (\from - .), (\to - .)
.popsection
.endm
#endif
#endif /* __ARM_KVM_ASM_H__ */
......@@ -28,14 +28,16 @@
* not. The macros handles invoking the asm with or without the
* register argument as appropriate.
*/
#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
"tlbi " #op "\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op, \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : )
#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
"tlbi " #op ", %0\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op ", %0", \
ARM64_WORKAROUND_REPEAT_TLBI, \
......
......@@ -103,6 +103,10 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
KVM_NVHE_ALIAS(gic_pmr_sync);
#endif
/* EL2 exception handling */
KVM_NVHE_ALIAS(__start___kvm_ex_table);
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
......@@ -20,6 +20,13 @@ ENTRY(_text)
jiffies = jiffies_64;
#define HYPERVISOR_EXTABLE \
. = ALIGN(SZ_8); \
__start___kvm_ex_table = .; \
*(__kvm_ex_table) \
__stop___kvm_ex_table = .;
#define HYPERVISOR_TEXT \
/* \
* Align to 4 KB so that \
......@@ -35,6 +42,7 @@ jiffies = jiffies_64;
__hyp_idmap_text_end = .; \
__hyp_text_start = .; \
*(.hyp.text) \
HYPERVISOR_EXTABLE \
__hyp_text_end = .;
#define IDMAP_TEXT \
......
......@@ -196,20 +196,23 @@ alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB.
.global abort_guest_exit_start
abort_guest_exit_start:
isb
.global abort_guest_exit_end
abort_guest_exit_end:
msr daifset, #4 // Mask aborts
ret
_kvm_extable abort_guest_exit_start, 9997f
_kvm_extable abort_guest_exit_end, 9997f
9997:
msr daifset, #4 // Mask aborts
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
// If the exception took place, restore the EL1 exception
// context so that we can report some information.
// Merge the exception code with the SError pending bit.
tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
// restore the EL1 exception context so that we can report some
// information. Merge the exception code with the SError pending bit.
msr elr_el2, x2
msr esr_el2, x3
msr spsr_el2, x4
......
......@@ -15,6 +15,30 @@
#include <asm/kvm_mmu.h>
#include <asm/mmu.h>
.macro save_caller_saved_regs_vect
/* x0 and x1 were saved in the vector entry */
stp x2, x3, [sp, #-16]!
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
.endm
.macro restore_caller_saved_regs_vect
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
.endm
.text
.macro do_el2_call
......@@ -143,13 +167,19 @@ el1_error:
b __guest_exit
el2_sync:
/* Check for illegal exception return, otherwise panic */
/* Check for illegal exception return */
mrs x0, spsr_el2
tbnz x0, #20, 1f
save_caller_saved_regs_vect
stp x29, x30, [sp, #-16]!
bl kvm_unexpected_el2_exception
ldp x29, x30, [sp], #16
restore_caller_saved_regs_vect
/* if this was something else, then panic! */
tst x0, #PSR_IL_BIT
b.eq __hyp_panic
eret
1:
/* Let's attempt a recovery from the illegal exception return */
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IL
......@@ -157,27 +187,14 @@ el2_sync:
el2_error:
ldp x0, x1, [sp], #16
save_caller_saved_regs_vect
stp x29, x30, [sp, #-16]!
bl kvm_unexpected_el2_exception
ldp x29, x30, [sp], #16
restore_caller_saved_regs_vect
/*
* Only two possibilities:
* 1) Either we come from the exit path, having just unmasked
* PSTATE.A: change the return code to an EL2 fault, and
* carry on, as we're already in a sane state to handle it.
* 2) Or we come from anywhere else, and that's a bug: we panic.
*
* For (1), x0 contains the original return code and x1 doesn't
* contain anything meaningful at that stage. We can reuse them
* as temp registers.
* For (2), who cares?
*/
mrs x0, elr_el2
adr x1, abort_guest_exit_start
cmp x0, x1
adr x1, abort_guest_exit_end
ccmp x0, x1, #4, ne
b.ne __hyp_panic
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
sb
......
......@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/extable.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
......@@ -29,6 +30,9 @@
extern const char __hyp_panic_string[];
extern struct exception_table_entry __start___kvm_ex_table;
extern struct exception_table_entry __stop___kvm_ex_table;
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
{
......@@ -142,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
asm volatile("at s1e1r, %0" : : "r" (far));
isb();
if (!__kvm_at("s1e1r", far))
tmp = read_sysreg(par_el1);
else
tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
if (unlikely(tmp & SYS_PAR_EL1_F))
......@@ -508,4 +512,31 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
#endif
}
static inline void __kvm_unexpected_el2_exception(void)
{
unsigned long addr, fixup;
struct kvm_cpu_context *host_ctxt;
struct exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
entry = hyp_symbol_addr(__start___kvm_ex_table);
end = hyp_symbol_addr(__stop___kvm_ex_table);
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
while (entry < end) {
addr = (unsigned long)&entry->insn + entry->insn;
fixup = (unsigned long)&entry->fixup + entry->fixup;
if (addr != elr_el2) {
entry++;
continue;
}
write_sysreg(fixup, elr_el2);
return;
}
hyp_panic(host_ctxt);
}
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
......@@ -270,3 +270,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
read_sysreg(hpfar_el2), par, vcpu);
unreachable();
}
asmlinkage void kvm_unexpected_el2_exception(void)
{
return __kvm_unexpected_el2_exception();
}
......@@ -217,3 +217,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
__hyp_call_panic(spsr, elr, par, host_ctxt);
unreachable();
}
asmlinkage void kvm_unexpected_el2_exception(void)
{
return __kvm_unexpected_el2_exception();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment