Commit 72f211ec authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Paolo Bonzini

KVM: x86: allow kvm_x86_ops.set_efer to return an error value

This will be used to signal an error to the userspace, in case
the vendor code failed during handling of this msr. (e.g -ENOMEM)
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20201001112954.6258-4-mlevitsk@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7dffecaf
...@@ -1101,7 +1101,7 @@ struct kvm_x86_ops { ...@@ -1101,7 +1101,7 @@ struct kvm_x86_ops {
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
......
...@@ -263,7 +263,7 @@ static int get_max_npt_level(void) ...@@ -263,7 +263,7 @@ static int get_max_npt_level(void)
#endif #endif
} }
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
vcpu->arch.efer = efer; vcpu->arch.efer = efer;
...@@ -283,6 +283,7 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -283,6 +283,7 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
svm->vmcb->save.efer = efer | EFER_SVME; svm->vmcb->save.efer = efer | EFER_SVME;
vmcb_mark_dirty(svm->vmcb, VMCB_CR); vmcb_mark_dirty(svm->vmcb, VMCB_CR);
return 0;
} }
static int is_external_interrupt(u32 info) static int is_external_interrupt(u32 info)
......
...@@ -350,7 +350,7 @@ static inline bool gif_set(struct vcpu_svm *svm) ...@@ -350,7 +350,7 @@ static inline bool gif_set(struct vcpu_svm *svm)
#define MSR_INVALID 0xffffffffU #define MSR_INVALID 0xffffffffU
u32 svm_msrpm_offset(u32 msr); u32 svm_msrpm_offset(u32 msr);
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void svm_flush_tlb(struct kvm_vcpu *vcpu); void svm_flush_tlb(struct kvm_vcpu *vcpu);
......
...@@ -2815,13 +2815,14 @@ static void enter_rmode(struct kvm_vcpu *vcpu) ...@@ -2815,13 +2815,14 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
} }
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER); struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
/* Nothing to do if hardware doesn't support EFER. */
if (!msr) if (!msr)
return; return 0;
vcpu->arch.efer = efer; vcpu->arch.efer = efer;
if (efer & EFER_LMA) { if (efer & EFER_LMA) {
...@@ -2833,6 +2834,7 @@ void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -2833,6 +2834,7 @@ void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
msr->data = efer & ~EFER_LME; msr->data = efer & ~EFER_LME;
} }
setup_msrs(vmx); setup_msrs(vmx);
return 0;
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -319,7 +319,7 @@ unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); ...@@ -319,7 +319,7 @@ unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
......
...@@ -1457,6 +1457,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1457,6 +1457,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
u64 old_efer = vcpu->arch.efer; u64 old_efer = vcpu->arch.efer;
u64 efer = msr_info->data; u64 efer = msr_info->data;
int r;
if (efer & efer_reserved_bits) if (efer & efer_reserved_bits)
return 1; return 1;
...@@ -1473,7 +1474,11 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1473,7 +1474,11 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
efer &= ~EFER_LMA; efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA;
kvm_x86_ops.set_efer(vcpu, efer); r = kvm_x86_ops.set_efer(vcpu, efer);
if (r) {
WARN_ON(r > 0);
return r;
}
/* Update reserved bits */ /* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX) if ((efer ^ old_efer) & EFER_NX)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment