Commit 53a43dd4 authored by Sean Christopherson's avatar Sean Christopherson

KVM: selftests: Move setting a vCPU's entry point to a dedicated API

Extract the code to set a vCPU's entry point out of vm_arch_vcpu_add() and
into a new API, vcpu_arch_set_entry_point().  Providing a separate API
will allow creating a KVM selftests hardness that can handle tests that
use different entry points for sub-tests, whereas *requiring* the entry
point to be specified at vCPU creation makes it difficult to create a
generic harness, e.g. the boilerplate setup/teardown can't easily create
and destroy the VM and vCPUs.
Signed-off-by: default avatarThomas Huth <thuth@redhat.com>
Link: https://lore.kernel.org/r/20240208204844.119326-4-thuth@redhat.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 221d6544
......@@ -969,15 +969,18 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
* Input Args:
* vm - Virtual Machine
* vcpu_id - The id of the VCPU to add to the VM.
* guest_code - The vCPU's entry point
*/
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code);
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
vcpu_arch_set_entry_point(vcpu, guest_code);
return vcpu;
}
/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
......
......@@ -365,8 +365,13 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
indent, "", pstate, pc);
}
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, void *guest_code)
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
}
static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init)
{
size_t stack_size;
uint64_t stack_vaddr;
......@@ -381,15 +386,22 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
aarch64_vcpu_setup(vcpu, init);
vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
return vcpu;
}
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, void *guest_code)
{
struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
vcpu_arch_set_entry_point(vcpu, guest_code);
return vcpu;
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
{
return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
return __aarch64_vcpu_add(vm, vcpu_id, NULL);
}
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
......
......@@ -277,8 +277,12 @@ static void __aligned(16) guest_unexp_trap(void)
0, 0, 0, 0, 0, 0);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
{
int r;
size_t stack_size;
......@@ -312,7 +316,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
/* Setup stack pointer and program counter of guest */
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
/* Setup default exception vector of guest */
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
......
......@@ -155,15 +155,18 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
virt_dump_region(stream, vm, indent, vm->pgd);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
vcpu->run->psw_addr = (uintptr_t)guest_code;
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
{
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
uint64_t stack_vaddr;
struct kvm_regs regs;
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
vm->page_size);
......@@ -184,9 +187,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */
vcpu_sregs_set(vcpu, &sregs);
run = vcpu->run;
run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
run->psw_addr = (uintptr_t)guest_code;
vcpu->run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
return vcpu;
}
......
......@@ -562,8 +562,16 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
sync_global_to_guest(vm, host_cpu_is_amd);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
struct kvm_regs regs;
vcpu_regs_get(vcpu, &regs);
regs.rip = (unsigned long) guest_code;
vcpu_regs_set(vcpu, &regs);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
{
struct kvm_mp_state mp_state;
struct kvm_regs regs;
......@@ -597,7 +605,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
vcpu_regs_get(vcpu, &regs);
regs.rflags = regs.rflags | 0x2;
regs.rsp = stack_vaddr;
regs.rip = (unsigned long) guest_code;
vcpu_regs_set(vcpu, &regs);
/* Setup the MP state */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment