Commit b2ec0423 authored by Tom Lendacky's avatar Tom Lendacky Committed by Paolo Bonzini

KVM: SVM: Remove the need to trigger an UNBLOCK event on AP creation

All SNP APs are initially started using the APIC INIT/SIPI sequence in
the guest. This sequence moves the AP MP state from
KVM_MP_STATE_UNINITIALIZED to KVM_MP_STATE_RUNNABLE, so there is no need
to attempt the UNBLOCK.

As it is, the UNBLOCK support in SVM is only enabled when AVIC is
enabled. When AVIC is disabled, AP creation is still successful.

Remove the KVM_REQ_UNBLOCK request from the AP creation code and revert
the changes to the vcpu_unblocking() kvm_x86_ops path.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarMichael Roth <michael.roth@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 73137f59
...@@ -3909,10 +3909,6 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm) ...@@ -3909,10 +3909,6 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
out: out:
if (kick) { if (kick) {
kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu); kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
if (target_vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
kvm_make_request(KVM_REQ_UNBLOCK, target_vcpu);
kvm_vcpu_kick(target_vcpu); kvm_vcpu_kick(target_vcpu);
} }
...@@ -4478,16 +4474,6 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) ...@@ -4478,16 +4474,6 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
return p; return p;
} }
void sev_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
if (!sev_snp_guest(vcpu->kvm))
return;
if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu) &&
vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
......
...@@ -4943,12 +4943,6 @@ static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu) ...@@ -4943,12 +4943,6 @@ static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
return page_address(page); return page_address(page);
} }
static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
sev_vcpu_unblocking(vcpu);
avic_vcpu_unblocking(vcpu);
}
static struct kvm_x86_ops svm_x86_ops __initdata = { static struct kvm_x86_ops svm_x86_ops __initdata = {
.name = KBUILD_MODNAME, .name = KBUILD_MODNAME,
...@@ -4971,7 +4965,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4971,7 +4965,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.vcpu_load = svm_vcpu_load, .vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put, .vcpu_put = svm_vcpu_put,
.vcpu_blocking = avic_vcpu_blocking, .vcpu_blocking = avic_vcpu_blocking,
.vcpu_unblocking = svm_vcpu_unblocking, .vcpu_unblocking = avic_vcpu_unblocking,
.update_exception_bitmap = svm_update_exception_bitmap, .update_exception_bitmap = svm_update_exception_bitmap,
.get_msr_feature = svm_get_msr_feature, .get_msr_feature = svm_get_msr_feature,
......
...@@ -734,7 +734,6 @@ int sev_cpu_init(struct svm_cpu_data *sd); ...@@ -734,7 +734,6 @@ int sev_cpu_init(struct svm_cpu_data *sd);
int sev_dev_get_attr(u32 group, u64 attr, u64 *val); int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
extern unsigned int max_sev_asid; extern unsigned int max_sev_asid;
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code); void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu); void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
...@@ -753,7 +752,6 @@ static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; } ...@@ -753,7 +752,6 @@ static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; } static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
#define max_sev_asid 0 #define max_sev_asid 0
static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {} static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
static inline void sev_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {} static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order) static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment