Commit 70466381 authored by Sean Christopherson's avatar Sean Christopherson

KVM: selftests: Consolidate common code for populating ucall struct

Make ucall() a common helper that populates struct ucall, and only calls
into arch code to make the actually call out to userspace.

Rename all arch-specific helpers to make it clear they're arch-specific,
and to avoid collisions with common helpers (one more on its way...)

Add WRITE_ONCE() to stores in ucall() code (as already done to aarch64
code in commit 9e2f6498 ("selftests: KVM: Handle compiler
optimizations in ucall")) to prevent clang optimizations breaking ucalls.

Cc: Colton Lewis <coltonlewis@google.com>
Reviewed-by: default avatarAndrew Jones <andrew.jones@linux.dev>
Tested-by: default avatarPeter Gonda <pgonda@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006003409.649993-2-seanjc@google.com
parent b3d93772
...@@ -47,6 +47,7 @@ LIBKVM += lib/memstress.c ...@@ -47,6 +47,7 @@ LIBKVM += lib/memstress.c
LIBKVM += lib/rbtree.c LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c LIBKVM += lib/test_util.c
LIBKVM += lib/ucall_common.c
LIBKVM_STRING += lib/string_override.c LIBKVM_STRING += lib/string_override.c
......
...@@ -24,10 +24,27 @@ struct ucall { ...@@ -24,10 +24,27 @@ struct ucall {
uint64_t args[UCALL_MAX_ARGS]; uint64_t args[UCALL_MAX_ARGS];
}; };
void ucall_init(struct kvm_vm *vm, void *arg); void ucall_arch_init(struct kvm_vm *vm, void *arg);
void ucall_uninit(struct kvm_vm *vm); void ucall_arch_uninit(struct kvm_vm *vm);
void ucall_arch_do_ucall(vm_vaddr_t uc);
uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
void ucall(uint64_t cmd, int nargs, ...); void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
static inline void ucall_init(struct kvm_vm *vm, void *arg)
{
ucall_arch_init(vm, arg);
}
static inline void ucall_uninit(struct kvm_vm *vm)
{
ucall_arch_uninit(vm);
}
static inline uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
return ucall_arch_get_ucall(vcpu, uc);
}
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
......
...@@ -21,7 +21,7 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) ...@@ -21,7 +21,7 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
return true; return true;
} }
void ucall_init(struct kvm_vm *vm, void *arg) void ucall_arch_init(struct kvm_vm *vm, void *arg)
{ {
vm_paddr_t gpa, start, end, step, offset; vm_paddr_t gpa, start, end, step, offset;
unsigned int bits; unsigned int bits;
...@@ -64,30 +64,18 @@ void ucall_init(struct kvm_vm *vm, void *arg) ...@@ -64,30 +64,18 @@ void ucall_init(struct kvm_vm *vm, void *arg)
TEST_FAIL("Can't find a ucall mmio address"); TEST_FAIL("Can't find a ucall mmio address");
} }
void ucall_uninit(struct kvm_vm *vm) void ucall_arch_uninit(struct kvm_vm *vm)
{ {
ucall_exit_mmio_addr = 0; ucall_exit_mmio_addr = 0;
sync_global_to_guest(vm, ucall_exit_mmio_addr); sync_global_to_guest(vm, ucall_exit_mmio_addr);
} }
void ucall(uint64_t cmd, int nargs, ...) void ucall_arch_do_ucall(vm_vaddr_t uc)
{ {
struct ucall uc = {}; WRITE_ONCE(*ucall_exit_mmio_addr, uc);
va_list va;
int i;
WRITE_ONCE(uc.cmd, cmd);
nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
va_end(va);
WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
} }
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall ucall = {}; struct ucall ucall = {};
......
...@@ -10,11 +10,11 @@ ...@@ -10,11 +10,11 @@
#include "kvm_util.h" #include "kvm_util.h"
#include "processor.h" #include "processor.h"
void ucall_init(struct kvm_vm *vm, void *arg) void ucall_arch_init(struct kvm_vm *vm, void *arg)
{ {
} }
void ucall_uninit(struct kvm_vm *vm) void ucall_arch_uninit(struct kvm_vm *vm)
{ {
} }
...@@ -44,27 +44,14 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, ...@@ -44,27 +44,14 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
return ret; return ret;
} }
void ucall(uint64_t cmd, int nargs, ...) void ucall_arch_do_ucall(vm_vaddr_t uc)
{ {
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
KVM_RISCV_SELFTESTS_SBI_UCALL, KVM_RISCV_SELFTESTS_SBI_UCALL,
(vm_vaddr_t)&uc, 0, 0, 0, 0, 0); uc, 0, 0, 0, 0, 0);
} }
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall ucall = {}; struct ucall ucall = {};
......
...@@ -6,34 +6,21 @@ ...@@ -6,34 +6,21 @@
*/ */
#include "kvm_util.h" #include "kvm_util.h"
void ucall_init(struct kvm_vm *vm, void *arg) void ucall_arch_init(struct kvm_vm *vm, void *arg)
{ {
} }
void ucall_uninit(struct kvm_vm *vm) void ucall_arch_uninit(struct kvm_vm *vm)
{ {
} }
void ucall(uint64_t cmd, int nargs, ...) void ucall_arch_do_ucall(vm_vaddr_t uc)
{ {
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */ /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory"); asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
} }
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall ucall = {}; struct ucall ucall = {};
......
// SPDX-License-Identifier: GPL-2.0-only
#include "kvm_util.h"
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall uc = {};
va_list va;
int i;
WRITE_ONCE(uc.cmd, cmd);
nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
va_end(va);
ucall_arch_do_ucall((vm_vaddr_t)&uc);
}
...@@ -8,34 +8,21 @@ ...@@ -8,34 +8,21 @@
#define UCALL_PIO_PORT ((uint16_t)0x1000) #define UCALL_PIO_PORT ((uint16_t)0x1000)
void ucall_init(struct kvm_vm *vm, void *arg) void ucall_arch_init(struct kvm_vm *vm, void *arg)
{ {
} }
void ucall_uninit(struct kvm_vm *vm) void ucall_arch_uninit(struct kvm_vm *vm)
{ {
} }
void ucall(uint64_t cmd, int nargs, ...) void ucall_arch_do_ucall(vm_vaddr_t uc)
{ {
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
asm volatile("in %[port], %%al" asm volatile("in %[port], %%al"
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax", "memory"); : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory");
} }
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall ucall = {}; struct ucall ucall = {};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment