Commit d916f003 authored by Tom Lendacky's avatar Tom Lendacky Committed by Paolo Bonzini

KVM: SEV: Add support to handle AP reset MSR protocol

Add support for AP Reset Hold being invoked using the GHCB MSR protocol,
available in version 2 of the GHCB specification.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Signed-off-by: default avatarAshish Kalra <ashish.kalra@amd.com>
Signed-off-by: default avatarMichael Roth <michael.roth@amd.com>
Message-ID: <20240501071048.2208265-2-michael.roth@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a96cb3bf
...@@ -56,6 +56,8 @@ ...@@ -56,6 +56,8 @@
/* AP Reset Hold */ /* AP Reset Hold */
#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006 #define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007 #define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
#define GHCB_MSR_AP_RESET_HOLD_RESULT_POS 12
#define GHCB_MSR_AP_RESET_HOLD_RESULT_MASK GENMASK_ULL(51, 0)
/* GHCB GPA Register */ /* GHCB GPA Register */
#define GHCB_MSR_REG_GPA_REQ 0x012 #define GHCB_MSR_REG_GPA_REQ 0x012
......
...@@ -49,6 +49,10 @@ static bool sev_es_debug_swap_enabled = true; ...@@ -49,6 +49,10 @@ static bool sev_es_debug_swap_enabled = true;
module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444); module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
static u64 sev_supported_vmsa_features; static u64 sev_supported_vmsa_features;
#define AP_RESET_HOLD_NONE 0
#define AP_RESET_HOLD_NAE_EVENT 1
#define AP_RESET_HOLD_MSR_PROTO 2
static u8 sev_enc_bit; static u8 sev_enc_bit;
static DECLARE_RWSEM(sev_deactivate_lock); static DECLARE_RWSEM(sev_deactivate_lock);
static DEFINE_MUTEX(sev_bitmap_lock); static DEFINE_MUTEX(sev_bitmap_lock);
...@@ -2727,6 +2731,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -2727,6 +2731,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
void sev_es_unmap_ghcb(struct vcpu_svm *svm) void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{ {
/* Clear any indication that the vCPU is in a type of AP Reset Hold */
svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE;
if (!svm->sev_es.ghcb) if (!svm->sev_es.ghcb)
return; return;
...@@ -2938,6 +2945,22 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) ...@@ -2938,6 +2945,22 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
GHCB_MSR_INFO_POS); GHCB_MSR_INFO_POS);
break; break;
} }
case GHCB_MSR_AP_RESET_HOLD_REQ:
svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO;
ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
/*
* Preset the result to a non-SIPI return and then only set
* the result to non-zero when delivering a SIPI.
*/
set_ghcb_msr_bits(svm, 0,
GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
GHCB_MSR_INFO_MASK,
GHCB_MSR_INFO_POS);
break;
case GHCB_MSR_TERM_REQ: { case GHCB_MSR_TERM_REQ: {
u64 reason_set, reason_code; u64 reason_set, reason_code;
...@@ -3037,6 +3060,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ...@@ -3037,6 +3060,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = 1; ret = 1;
break; break;
case SVM_VMGEXIT_AP_HLT_LOOP: case SVM_VMGEXIT_AP_HLT_LOOP:
svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT;
ret = kvm_emulate_ap_reset_hold(vcpu); ret = kvm_emulate_ap_reset_hold(vcpu);
break; break;
case SVM_VMGEXIT_AP_JUMP_TABLE: { case SVM_VMGEXIT_AP_JUMP_TABLE: {
...@@ -3280,15 +3304,31 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ...@@ -3280,15 +3304,31 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
return; return;
} }
/* Subsequent SIPI */
switch (svm->sev_es.ap_reset_hold_type) {
case AP_RESET_HOLD_NAE_EVENT:
/* /*
* Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where * Return from an AP Reset Hold VMGEXIT, where the guest will
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a * set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value.
* non-zero value.
*/ */
if (!svm->sev_es.ghcb)
return;
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
break;
case AP_RESET_HOLD_MSR_PROTO:
/*
* Return from an AP Reset Hold VMGEXIT, where the guest will
* set the CS and RIP. Set GHCB data field to a non-zero value.
*/
set_ghcb_msr_bits(svm, 1,
GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
GHCB_MSR_INFO_MASK,
GHCB_MSR_INFO_POS);
break;
default:
break;
}
} }
struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
......
...@@ -199,6 +199,7 @@ struct vcpu_sev_es_state { ...@@ -199,6 +199,7 @@ struct vcpu_sev_es_state {
u8 valid_bitmap[16]; u8 valid_bitmap[16];
struct kvm_host_map ghcb_map; struct kvm_host_map ghcb_map;
bool received_first_sipi; bool received_first_sipi;
unsigned int ap_reset_hold_type;
/* SEV-ES scratch area support */ /* SEV-ES scratch area support */
u64 sw_scratch; u64 sw_scratch;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment