Commit 1e7ecd1b authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: selftests: state_test: test bare VMXON migration

Split prepare_for_vmx_operation() into prepare_for_vmx_operation() and
load_vmcs() so we can inject GUEST_SYNC() in between.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a1b0c1c6
...@@ -548,5 +548,6 @@ struct vmx_pages { ...@@ -548,5 +548,6 @@ struct vmx_pages {
struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
bool prepare_for_vmx_operation(struct vmx_pages *vmx); bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);
#endif /* SELFTEST_KVM_VMX_H */ #endif /* SELFTEST_KVM_VMX_H */
...@@ -107,6 +107,11 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx) ...@@ -107,6 +107,11 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
if (vmxon(vmx->vmxon_gpa)) if (vmxon(vmx->vmxon_gpa))
return false; return false;
return true;
}
bool load_vmcs(struct vmx_pages *vmx)
{
/* Load a VMCS. */ /* Load a VMCS. */
*(uint32_t *)(vmx->vmcs) = vmcs_revision(); *(uint32_t *)(vmx->vmcs) = vmcs_revision();
if (vmclear(vmx->vmcs_gpa)) if (vmclear(vmx->vmcs_gpa))
......
...@@ -26,20 +26,20 @@ static bool have_nested_state; ...@@ -26,20 +26,20 @@ static bool have_nested_state;
void l2_guest_code(void) void l2_guest_code(void)
{ {
GUEST_SYNC(5); GUEST_SYNC(6);
/* Exit to L1 */ /* Exit to L1 */
vmcall(); vmcall();
/* L1 has now set up a shadow VMCS for us. */ /* L1 has now set up a shadow VMCS for us. */
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_SYNC(9); GUEST_SYNC(10);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee)); GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
GUEST_SYNC(10); GUEST_SYNC(11);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee)); GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
GUEST_SYNC(11); GUEST_SYNC(12);
/* Done, exit to L1 and never come back. */ /* Done, exit to L1 and never come back. */
vmcall(); vmcall();
...@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages) ...@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(vmx_pages->vmcs_gpa); GUEST_ASSERT(vmx_pages->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_SYNC(3);
GUEST_ASSERT(load_vmcs(vmx_pages));
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
GUEST_SYNC(3); GUEST_SYNC(4);
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
prepare_vmcs(vmx_pages, l2_guest_code, prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]); &l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(4); GUEST_SYNC(5);
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
...@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages) ...@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(!vmresume()); GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_SYNC(6); GUEST_SYNC(7);
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_ASSERT(!vmresume()); GUEST_ASSERT(!vmresume());
...@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages) ...@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa)); GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmlaunch());
GUEST_SYNC(7); GUEST_SYNC(8);
GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume()); GUEST_ASSERT(vmresume());
vmwrite(GUEST_RIP, 0xc0ffee); vmwrite(GUEST_RIP, 0xc0ffee);
GUEST_SYNC(8); GUEST_SYNC(9);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa)); GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
...@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages) ...@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume()); GUEST_ASSERT(vmresume());
GUEST_SYNC(12); GUEST_SYNC(13);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume()); GUEST_ASSERT(vmresume());
......
...@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) ...@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
/* Prepare the VMCS for L2 execution. */ /* Prepare the VMCS for L2 execution. */
prepare_vmcs(vmx_pages, l2_guest_code, prepare_vmcs(vmx_pages, l2_guest_code,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment