Commit b67a4cc3 authored by Peter Gonda's avatar Peter Gonda Committed by Paolo Bonzini

KVM: SEV: Refactor out sev_es_state struct

Move SEV-ES vCPU metadata into new sev_es_state struct from vcpu_svm.
Signed-off-by: default avatarPeter Gonda <pgonda@google.com>
Suggested-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Acked-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Cc: Marc Orr <marcorr@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Message-Id: <20211021174303.385706-2-pgonda@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 52cf891d
...@@ -590,7 +590,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm) ...@@ -590,7 +590,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
* traditional VMSA as it has been built so far (in prep * traditional VMSA as it has been built so far (in prep
* for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state. * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
*/ */
memcpy(svm->vmsa, save, sizeof(*save)); memcpy(svm->sev_es.vmsa, save, sizeof(*save));
return 0; return 0;
} }
...@@ -612,11 +612,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -612,11 +612,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
* the VMSA memory content (i.e it will write the same memory region * the VMSA memory content (i.e it will write the same memory region
* with the guest's key), so invalidate it first. * with the guest's key), so invalidate it first.
*/ */
clflush_cache_range(svm->vmsa, PAGE_SIZE); clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
vmsa.reserved = 0; vmsa.reserved = 0;
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle; vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
vmsa.address = __sme_pa(svm->vmsa); vmsa.address = __sme_pa(svm->sev_es.vmsa);
vmsa.len = PAGE_SIZE; vmsa.len = PAGE_SIZE;
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error); return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
} }
...@@ -2026,16 +2026,16 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -2026,16 +2026,16 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
svm = to_svm(vcpu); svm = to_svm(vcpu);
if (vcpu->arch.guest_state_protected) if (vcpu->arch.guest_state_protected)
sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE); sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
__free_page(virt_to_page(svm->vmsa)); __free_page(virt_to_page(svm->sev_es.vmsa));
if (svm->ghcb_sa_free) if (svm->sev_es.ghcb_sa_free)
kfree(svm->ghcb_sa); kfree(svm->sev_es.ghcb_sa);
} }
static void dump_ghcb(struct vcpu_svm *svm) static void dump_ghcb(struct vcpu_svm *svm)
{ {
struct ghcb *ghcb = svm->ghcb; struct ghcb *ghcb = svm->sev_es.ghcb;
unsigned int nbits; unsigned int nbits;
/* Re-use the dump_invalid_vmcb module parameter */ /* Re-use the dump_invalid_vmcb module parameter */
...@@ -2061,7 +2061,7 @@ static void dump_ghcb(struct vcpu_svm *svm) ...@@ -2061,7 +2061,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm) static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
struct ghcb *ghcb = svm->ghcb; struct ghcb *ghcb = svm->sev_es.ghcb;
/* /*
* The GHCB protocol so far allows for the following data * The GHCB protocol so far allows for the following data
...@@ -2081,7 +2081,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) ...@@ -2081,7 +2081,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
{ {
struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_control_area *control = &svm->vmcb->control;
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
struct ghcb *ghcb = svm->ghcb; struct ghcb *ghcb = svm->sev_es.ghcb;
u64 exit_code; u64 exit_code;
/* /*
...@@ -2128,7 +2128,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -2128,7 +2128,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
struct ghcb *ghcb; struct ghcb *ghcb;
u64 exit_code = 0; u64 exit_code = 0;
ghcb = svm->ghcb; ghcb = svm->sev_es.ghcb;
/* Only GHCB Usage code 0 is supported */ /* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage) if (ghcb->ghcb_usage)
...@@ -2246,33 +2246,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -2246,33 +2246,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
void sev_es_unmap_ghcb(struct vcpu_svm *svm) void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{ {
if (!svm->ghcb) if (!svm->sev_es.ghcb)
return; return;
if (svm->ghcb_sa_free) { if (svm->sev_es.ghcb_sa_free) {
/* /*
* The scratch area lives outside the GHCB, so there is a * The scratch area lives outside the GHCB, so there is a
* buffer that, depending on the operation performed, may * buffer that, depending on the operation performed, may
* need to be synced, then freed. * need to be synced, then freed.
*/ */
if (svm->ghcb_sa_sync) { if (svm->sev_es.ghcb_sa_sync) {
kvm_write_guest(svm->vcpu.kvm, kvm_write_guest(svm->vcpu.kvm,
ghcb_get_sw_scratch(svm->ghcb), ghcb_get_sw_scratch(svm->sev_es.ghcb),
svm->ghcb_sa, svm->ghcb_sa_len); svm->sev_es.ghcb_sa,
svm->ghcb_sa_sync = false; svm->sev_es.ghcb_sa_len);
svm->sev_es.ghcb_sa_sync = false;
} }
kfree(svm->ghcb_sa); kfree(svm->sev_es.ghcb_sa);
svm->ghcb_sa = NULL; svm->sev_es.ghcb_sa = NULL;
svm->ghcb_sa_free = false; svm->sev_es.ghcb_sa_free = false;
} }
trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb); trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
sev_es_sync_to_ghcb(svm); sev_es_sync_to_ghcb(svm);
kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true); kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
svm->ghcb = NULL; svm->sev_es.ghcb = NULL;
} }
void pre_sev_run(struct vcpu_svm *svm, int cpu) void pre_sev_run(struct vcpu_svm *svm, int cpu)
...@@ -2302,7 +2303,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) ...@@ -2302,7 +2303,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
{ {
struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_control_area *control = &svm->vmcb->control;
struct ghcb *ghcb = svm->ghcb; struct ghcb *ghcb = svm->sev_es.ghcb;
u64 ghcb_scratch_beg, ghcb_scratch_end; u64 ghcb_scratch_beg, ghcb_scratch_end;
u64 scratch_gpa_beg, scratch_gpa_end; u64 scratch_gpa_beg, scratch_gpa_end;
void *scratch_va; void *scratch_va;
...@@ -2338,7 +2339,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) ...@@ -2338,7 +2339,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
return false; return false;
} }
scratch_va = (void *)svm->ghcb; scratch_va = (void *)svm->sev_es.ghcb;
scratch_va += (scratch_gpa_beg - control->ghcb_gpa); scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
} else { } else {
/* /*
...@@ -2368,12 +2369,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) ...@@ -2368,12 +2369,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
* the vCPU next time (i.e. a read was requested so the data * the vCPU next time (i.e. a read was requested so the data
* must be written back to the guest memory). * must be written back to the guest memory).
*/ */
svm->ghcb_sa_sync = sync; svm->sev_es.ghcb_sa_sync = sync;
svm->ghcb_sa_free = true; svm->sev_es.ghcb_sa_free = true;
} }
svm->ghcb_sa = scratch_va; svm->sev_es.ghcb_sa = scratch_va;
svm->ghcb_sa_len = len; svm->sev_es.ghcb_sa_len = len;
return true; return true;
} }
...@@ -2492,15 +2493,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ...@@ -2492,15 +2493,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
return -EINVAL; return -EINVAL;
} }
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) { if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
/* Unable to map GHCB from guest */ /* Unable to map GHCB from guest */
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa); ghcb_gpa);
return -EINVAL; return -EINVAL;
} }
svm->ghcb = svm->ghcb_map.hva; svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
ghcb = svm->ghcb_map.hva; ghcb = svm->sev_es.ghcb_map.hva;
trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
...@@ -2523,7 +2524,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ...@@ -2523,7 +2524,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_sev_es_mmio_read(vcpu, ret = kvm_sev_es_mmio_read(vcpu,
control->exit_info_1, control->exit_info_1,
control->exit_info_2, control->exit_info_2,
svm->ghcb_sa); svm->sev_es.ghcb_sa);
break; break;
case SVM_VMGEXIT_MMIO_WRITE: case SVM_VMGEXIT_MMIO_WRITE:
if (!setup_vmgexit_scratch(svm, false, control->exit_info_2)) if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
...@@ -2532,7 +2533,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ...@@ -2532,7 +2533,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_sev_es_mmio_write(vcpu, ret = kvm_sev_es_mmio_write(vcpu,
control->exit_info_1, control->exit_info_1,
control->exit_info_2, control->exit_info_2,
svm->ghcb_sa); svm->sev_es.ghcb_sa);
break; break;
case SVM_VMGEXIT_NMI_COMPLETE: case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET); ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
...@@ -2582,8 +2583,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) ...@@ -2582,8 +2583,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2)) if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
return -EINVAL; return -EINVAL;
return kvm_sev_es_string_io(&svm->vcpu, size, port, return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
svm->ghcb_sa, svm->ghcb_sa_len / size, in); svm->sev_es.ghcb_sa_len / size, in);
} }
void sev_es_init_vmcb(struct vcpu_svm *svm) void sev_es_init_vmcb(struct vcpu_svm *svm)
...@@ -2598,7 +2599,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm) ...@@ -2598,7 +2599,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
* VMCB page. Do not include the encryption mask on the VMSA physical * VMCB page. Do not include the encryption mask on the VMSA physical
* address since hardware will access it using the guest key. * address since hardware will access it using the guest key.
*/ */
svm->vmcb->control.vmsa_pa = __pa(svm->vmsa); svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
/* Can't intercept CR register access, HV can't modify CR registers */ /* Can't intercept CR register access, HV can't modify CR registers */
svm_clr_intercept(svm, INTERCEPT_CR0_READ); svm_clr_intercept(svm, INTERCEPT_CR0_READ);
...@@ -2670,8 +2671,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ...@@ -2670,8 +2671,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
/* First SIPI: Use the values as initially set by the VMM */ /* First SIPI: Use the values as initially set by the VMM */
if (!svm->received_first_sipi) { if (!svm->sev_es.received_first_sipi) {
svm->received_first_sipi = true; svm->sev_es.received_first_sipi = true;
return; return;
} }
...@@ -2680,8 +2681,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ...@@ -2680,8 +2681,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
* non-zero value. * non-zero value.
*/ */
if (!svm->ghcb) if (!svm->sev_es.ghcb)
return; return;
ghcb_set_sw_exit_info_2(svm->ghcb, 1); ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
} }
...@@ -1450,7 +1450,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -1450,7 +1450,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm_switch_vmcb(svm, &svm->vmcb01); svm_switch_vmcb(svm, &svm->vmcb01);
if (vmsa_page) if (vmsa_page)
svm->vmsa = page_address(vmsa_page); svm->sev_es.vmsa = page_address(vmsa_page);
svm->guest_state_loaded = false; svm->guest_state_loaded = false;
...@@ -2833,11 +2833,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2833,11 +2833,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb)) if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
return kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);
ghcb_set_sw_exit_info_1(svm->ghcb, 1); ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
ghcb_set_sw_exit_info_2(svm->ghcb, ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
X86_TRAP_GP | X86_TRAP_GP |
SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_TYPE_EXEPT |
SVM_EVTINJ_VALID); SVM_EVTINJ_VALID);
......
...@@ -123,6 +123,20 @@ struct svm_nested_state { ...@@ -123,6 +123,20 @@ struct svm_nested_state {
bool initialized; bool initialized;
}; };
struct vcpu_sev_es_state {
/* SEV-ES support */
struct vmcb_save_area *vmsa;
struct ghcb *ghcb;
struct kvm_host_map ghcb_map;
bool received_first_sipi;
/* SEV-ES scratch area support */
void *ghcb_sa;
u64 ghcb_sa_len;
bool ghcb_sa_sync;
bool ghcb_sa_free;
};
struct vcpu_svm { struct vcpu_svm {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
...@@ -186,17 +200,7 @@ struct vcpu_svm { ...@@ -186,17 +200,7 @@ struct vcpu_svm {
DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
} shadow_msr_intercept; } shadow_msr_intercept;
/* SEV-ES support */ struct vcpu_sev_es_state sev_es;
struct vmcb_save_area *vmsa;
struct ghcb *ghcb;
struct kvm_host_map ghcb_map;
bool received_first_sipi;
/* SEV-ES scratch area support */
void *ghcb_sa;
u64 ghcb_sa_len;
bool ghcb_sa_sync;
bool ghcb_sa_free;
bool guest_state_loaded; bool guest_state_loaded;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment