Commit 3b23054c authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Add x86-64 support for exception fixup

Add x86-64 support for exception fixup on single instructions, without
forcing tests to install their own fault handlers.  Use registers r9-r11
to flag the instruction as "safe" and pass fixup/vector information,
i.e. introduce yet another flavor of fixup (versus the kernel's in-memory
tables and KUT's per-CPU area) to take advantage of KVM sefltests being
64-bit only.

Using only registers avoids the need to allocate fixup tables, ensure
FS or GS base is valid for the guest, ensure memory is mapped into the
guest, etc..., and also reduces the potential for recursive faults due to
accessing memory.

Providing exception fixup trivializes tests that just want to verify that
an instruction faults, e.g. no need to track start/end using global
labels, no need to install a dedicated handler, etc...

Deliberately do not support #DE in exception fixup so that the fixup glue
doesn't need to account for a fault with vector == 0, i.e. the vector can
also indicate that a fault occurred.  KVM injects #DE only for esoteric
emulation scenarios, i.e. there's very, very little value in testing #DE.
Force any test that wants to generate #DEs to install its own handler(s).

Use kvm_pv_test as a guinea pig for the new fixup, as it has a very
straightforward use case of wanting to verify that RDMSR and WRMSR fault.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220608224516.3788274-3-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bfbcc81b
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include <asm/prctl.h> #include <asm/prctl.h>
#include <linux/stringify.h>
#include "../kvm_util.h" #include "../kvm_util.h"
#define NMI_VECTOR 0x02 #define NMI_VECTOR 0x02
...@@ -541,6 +543,78 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); ...@@ -541,6 +543,78 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
void vm_install_exception_handler(struct kvm_vm *vm, int vector, void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *)); void (*handler)(struct ex_regs *));
/* If a toddler were to say "abracadabra". */
#define KVM_EXCEPTION_MAGIC 0xabacadabaull
/*
* KVM selftest exception fixup uses registers to coordinate with the exception
* handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
* per-CPU data. Using only registers avoids having to map memory into the
* guest, doesn't require a valid, stable GS.base, and reduces the risk of
* for recursive faults when accessing memory in the handler. The downside to
* using registers is that it restricts what registers can be used by the actual
* instruction. But, selftests are 64-bit only, making register* pressure a
* minor concern. Use r9-r11 as they are volatile, i.e. don't need* to be saved
* by the callee, and except for r11 are not implicit parameters to any
* instructions. Ideally, fixup would use r8-r10 and thus avoid implicit
* parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
* is higher priority than testing non-faulting SYSCALL/SYSRET.
*
* Note, the fixup handler deliberately does not handle #DE, i.e. the vector
* is guaranteed to be non-zero on fault.
*
* REGISTER INPUTS:
* r9 = MAGIC
* r10 = RIP
* r11 = new RIP on fault
*
* REGISTER OUTPUTS:
* r9 = exception vector (non-zero)
*/
#define KVM_ASM_SAFE(insn) \
"mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \
"lea 1f(%%rip), %%r10\n\t" \
"lea 2f(%%rip), %%r11\n\t" \
"1: " insn "\n\t" \
"mov $0, %[vector]\n\t" \
"jmp 3f\n\t" \
"2:\n\t" \
"mov %%r9b, %[vector]\n\t" \
"3:\n\t"
#define KVM_ASM_SAFE_OUTPUTS(v) [vector] "=qm"(v)
#define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11"
#define kvm_asm_safe(insn, inputs...) \
({ \
uint8_t vector; \
\
asm volatile(KVM_ASM_SAFE(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector) \
: inputs \
: KVM_ASM_SAFE_CLOBBERS); \
vector; \
})
static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val)
{
uint8_t vector;
uint32_t a, d;
asm volatile(KVM_ASM_SAFE("rdmsr")
: "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector)
: "c"(msr)
: KVM_ASM_SAFE_CLOBBERS);
*val = (uint64_t)a | ((uint64_t)d << 32);
return vector;
}
static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
{
return kvm_asm_safe("wrmsr", "A"(val), "c"(msr));
}
uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
uint64_t vaddr); uint64_t vaddr);
void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
......
...@@ -1127,6 +1127,20 @@ static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, ...@@ -1127,6 +1127,20 @@ static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
e->offset2 = addr >> 32; e->offset2 = addr >> 32;
} }
static bool kvm_fixup_exception(struct ex_regs *regs)
{
if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
return false;
if (regs->vector == DE_VECTOR)
return false;
regs->rip = regs->r11;
regs->r9 = regs->vector;
return true;
}
void kvm_exit_unexpected_vector(uint32_t value) void kvm_exit_unexpected_vector(uint32_t value)
{ {
ucall(UCALL_UNHANDLED, 1, value); ucall(UCALL_UNHANDLED, 1, value);
...@@ -1142,6 +1156,9 @@ void route_exception(struct ex_regs *regs) ...@@ -1142,6 +1156,9 @@ void route_exception(struct ex_regs *regs)
return; return;
} }
if (kvm_fixup_exception(regs))
return;
kvm_exit_unexpected_vector(regs->vector); kvm_exit_unexpected_vector(regs->vector);
} }
......
...@@ -12,55 +12,6 @@ ...@@ -12,55 +12,6 @@
#include "kvm_util.h" #include "kvm_util.h"
#include "processor.h" #include "processor.h"
extern unsigned char rdmsr_start;
extern unsigned char rdmsr_end;
static u64 do_rdmsr(u32 idx)
{
u32 lo, hi;
asm volatile("rdmsr_start: rdmsr;"
"rdmsr_end:"
: "=a"(lo), "=c"(hi)
: "c"(idx));
return (((u64) hi) << 32) | lo;
}
extern unsigned char wrmsr_start;
extern unsigned char wrmsr_end;
static void do_wrmsr(u32 idx, u64 val)
{
u32 lo, hi;
lo = val;
hi = val >> 32;
asm volatile("wrmsr_start: wrmsr;"
"wrmsr_end:"
: : "a"(lo), "c"(idx), "d"(hi));
}
static int nr_gp;
static void guest_gp_handler(struct ex_regs *regs)
{
unsigned char *rip = (unsigned char *)regs->rip;
bool r, w;
r = rip == &rdmsr_start;
w = rip == &wrmsr_start;
GUEST_ASSERT(r || w);
nr_gp++;
if (r)
regs->rip = (uint64_t)&rdmsr_end;
else
regs->rip = (uint64_t)&wrmsr_end;
}
struct msr_data { struct msr_data {
uint32_t idx; uint32_t idx;
const char *name; const char *name;
...@@ -89,14 +40,16 @@ static struct msr_data msrs_to_test[] = { ...@@ -89,14 +40,16 @@ static struct msr_data msrs_to_test[] = {
static void test_msr(struct msr_data *msr) static void test_msr(struct msr_data *msr)
{ {
uint64_t ignored;
uint8_t vector;
PR_MSR(msr); PR_MSR(msr);
do_rdmsr(msr->idx);
GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
nr_gp = 0; vector = rdmsr_safe(msr->idx, &ignored);
do_wrmsr(msr->idx, 0); GUEST_ASSERT_1(vector == GP_VECTOR, vector);
GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
nr_gp = 0; vector = wrmsr_safe(msr->idx, 0);
GUEST_ASSERT_1(vector == GP_VECTOR, vector);
} }
struct hcall_data { struct hcall_data {
...@@ -165,12 +118,6 @@ static void pr_hcall(struct ucall *uc) ...@@ -165,12 +118,6 @@ static void pr_hcall(struct ucall *uc)
pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr); pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
} }
static void handle_abort(struct ucall *uc)
{
TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
__FILE__, uc->args[1]);
}
static void enter_guest(struct kvm_vcpu *vcpu) static void enter_guest(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
...@@ -190,7 +137,9 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -190,7 +137,9 @@ static void enter_guest(struct kvm_vcpu *vcpu)
pr_hcall(&uc); pr_hcall(&uc);
break; break;
case UCALL_ABORT: case UCALL_ABORT:
handle_abort(&uc); TEST_FAIL("%s at %s:%ld, vector = %lu",
(const char *)uc.args[0], __FILE__,
uc.args[1], uc.args[2]);
return; return;
case UCALL_DONE: case UCALL_DONE:
return; return;
...@@ -216,7 +165,6 @@ int main(void) ...@@ -216,7 +165,6 @@ int main(void)
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
enter_guest(vcpu); enter_guest(vcpu);
kvm_vm_free(vm); kvm_vm_free(vm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment