Commit e9ee186b authored by James Morse's avatar James Morse Committed by Catalin Marinas

KVM: arm64: Add kvm_extable for vaxorcism code

KVM has a one instruction window where it will allow an SError exception
to be consumed by the hypervisor without treating it as a hypervisor bug.
This is used to consume asynchronous external abort that were caused by
the guest.

As we are about to add another location that survives unexpected exceptions,
generalise this code to make it behave like the host's extable.

KVM's version has to be mapped to EL2 to be accessible on nVHE systems.

The SError vaxorcism code is a one instruction window, so has two entries
in the extable. Because the KVM code is copied for VHE and nVHE, we end up
with four entries, half of which correspond with code that isn't mapped.
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Reviewed-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 5d28ba5f
...@@ -193,6 +193,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; ...@@ -193,6 +193,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm .endm
/*
* KVM extable for unexpected exceptions.
* In the same format _asm_extable, but output to a different section so that
* it can be mapped to EL2. The KVM version is not sorted. The caller must
* ensure:
* x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
* code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
*/
.macro _kvm_extable, from, to
.pushsection __kvm_ex_table, "a"
.align 3
.long (\from - .), (\to - .)
.popsection
.endm
#endif #endif
#endif /* __ARM_KVM_ASM_H__ */ #endif /* __ARM_KVM_ASM_H__ */
...@@ -103,6 +103,10 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); ...@@ -103,6 +103,10 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
KVM_NVHE_ALIAS(gic_pmr_sync); KVM_NVHE_ALIAS(gic_pmr_sync);
#endif #endif
/* EL2 exception handling */
KVM_NVHE_ALIAS(__start___kvm_ex_table);
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
#endif /* CONFIG_KVM */ #endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */ #endif /* __ARM64_KERNEL_IMAGE_VARS_H */
...@@ -20,6 +20,13 @@ ENTRY(_text) ...@@ -20,6 +20,13 @@ ENTRY(_text)
jiffies = jiffies_64; jiffies = jiffies_64;
#define HYPERVISOR_EXTABLE \
. = ALIGN(SZ_8); \
__start___kvm_ex_table = .; \
*(__kvm_ex_table) \
__stop___kvm_ex_table = .;
#define HYPERVISOR_TEXT \ #define HYPERVISOR_TEXT \
/* \ /* \
* Align to 4 KB so that \ * Align to 4 KB so that \
...@@ -35,6 +42,7 @@ jiffies = jiffies_64; ...@@ -35,6 +42,7 @@ jiffies = jiffies_64;
__hyp_idmap_text_end = .; \ __hyp_idmap_text_end = .; \
__hyp_text_start = .; \ __hyp_text_start = .; \
*(.hyp.text) \ *(.hyp.text) \
HYPERVISOR_EXTABLE \
__hyp_text_end = .; __hyp_text_end = .;
#define IDMAP_TEXT \ #define IDMAP_TEXT \
......
...@@ -196,20 +196,23 @@ alternative_endif ...@@ -196,20 +196,23 @@ alternative_endif
// This is our single instruction exception window. A pending // This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask // SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB. // it, and at the latest just after the ISB.
.global abort_guest_exit_start
abort_guest_exit_start: abort_guest_exit_start:
isb isb
.global abort_guest_exit_end
abort_guest_exit_end: abort_guest_exit_end:
msr daifset, #4 // Mask aborts msr daifset, #4 // Mask aborts
ret
_kvm_extable abort_guest_exit_start, 9997f
_kvm_extable abort_guest_exit_end, 9997f
9997:
msr daifset, #4 // Mask aborts
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
// If the exception took place, restore the EL1 exception // restore the EL1 exception context so that we can report some
// context so that we can report some information. // information. Merge the exception code with the SError pending bit.
// Merge the exception code with the SError pending bit.
tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
msr elr_el2, x2 msr elr_el2, x2
msr esr_el2, x3 msr esr_el2, x3
msr spsr_el2, x4 msr spsr_el2, x4
......
...@@ -15,6 +15,30 @@ ...@@ -15,6 +15,30 @@
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/mmu.h> #include <asm/mmu.h>
.macro save_caller_saved_regs_vect
/* x0 and x1 were saved in the vector entry */
stp x2, x3, [sp, #-16]!
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
.endm
.macro restore_caller_saved_regs_vect
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
.endm
.text .text
.macro do_el2_call .macro do_el2_call
...@@ -157,27 +181,14 @@ el2_sync: ...@@ -157,27 +181,14 @@ el2_sync:
el2_error: el2_error:
ldp x0, x1, [sp], #16 save_caller_saved_regs_vect
stp x29, x30, [sp, #-16]!
bl kvm_unexpected_el2_exception
ldp x29, x30, [sp], #16
restore_caller_saved_regs_vect
/*
* Only two possibilities:
* 1) Either we come from the exit path, having just unmasked
* PSTATE.A: change the return code to an EL2 fault, and
* carry on, as we're already in a sane state to handle it.
* 2) Or we come from anywhere else, and that's a bug: we panic.
*
* For (1), x0 contains the original return code and x1 doesn't
* contain anything meaningful at that stage. We can reuse them
* as temp registers.
* For (2), who cares?
*/
mrs x0, elr_el2
adr x1, abort_guest_exit_start
cmp x0, x1
adr x1, abort_guest_exit_end
ccmp x0, x1, #4, ne
b.ne __hyp_panic
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret eret
sb sb
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/extable.h>
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
...@@ -29,6 +30,9 @@ ...@@ -29,6 +30,9 @@
extern const char __hyp_panic_string[]; extern const char __hyp_panic_string[];
extern struct exception_table_entry __start___kvm_ex_table;
extern struct exception_table_entry __stop___kvm_ex_table;
/* Check whether the FP regs were dirtied while in the host-side run loop: */ /* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
{ {
...@@ -508,4 +512,31 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) ...@@ -508,4 +512,31 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
#endif #endif
} }
static inline void __kvm_unexpected_el2_exception(void)
{
unsigned long addr, fixup;
struct kvm_cpu_context *host_ctxt;
struct exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
entry = hyp_symbol_addr(__start___kvm_ex_table);
end = hyp_symbol_addr(__stop___kvm_ex_table);
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
while (entry < end) {
addr = (unsigned long)&entry->insn + entry->insn;
fixup = (unsigned long)&entry->fixup + entry->fixup;
if (addr != elr_el2) {
entry++;
continue;
}
write_sysreg(fixup, elr_el2);
return;
}
hyp_panic(host_ctxt);
}
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */ #endif /* __ARM64_KVM_HYP_SWITCH_H__ */
...@@ -270,3 +270,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) ...@@ -270,3 +270,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
read_sysreg(hpfar_el2), par, vcpu); read_sysreg(hpfar_el2), par, vcpu);
unreachable(); unreachable();
} }
asmlinkage void kvm_unexpected_el2_exception(void)
{
return __kvm_unexpected_el2_exception();
}
...@@ -217,3 +217,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) ...@@ -217,3 +217,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
__hyp_call_panic(spsr, elr, par, host_ctxt); __hyp_call_panic(spsr, elr, par, host_ctxt);
unreachable(); unreachable();
} }
asmlinkage void kvm_unexpected_el2_exception(void)
{
return __kvm_unexpected_el2_exception();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment