Commit 2a32a77c authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: SVM: merge update_cr0_intercept into svm_set_cr0

The logic of update_cr0_intercept is pointlessly complicated.
All svm_set_cr0 is compute the effective cr0 and compare it with
the guest value.

Inlining the function and simplifying the condition
clarifies what it is doing.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 11f0cbf0
...@@ -1720,37 +1720,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) ...@@ -1720,37 +1720,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vmcb_mark_dirty(svm->vmcb, VMCB_DT); vmcb_mark_dirty(svm->vmcb, VMCB_DT);
} }
static void update_cr0_intercept(struct vcpu_svm *svm)
{
ulong gcr0;
u64 *hcr0;
/*
* SEV-ES guests must always keep the CR intercepts cleared. CR
* tracking is done using the CR write traps.
*/
if (sev_es_guest(svm->vcpu.kvm))
return;
gcr0 = svm->vcpu.arch.cr0;
hcr0 = &svm->vmcb->save.cr0;
*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
| (gcr0 & SVM_CR0_SELECTIVE_MASK);
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
if (gcr0 == *hcr0) {
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
} else {
svm_set_intercept(svm, INTERCEPT_CR0_READ);
svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
}
}
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
u64 hcr0 = cr0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
...@@ -1768,7 +1741,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1768,7 +1741,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu->arch.cr0 = cr0; vcpu->arch.cr0 = cr0;
if (!npt_enabled) if (!npt_enabled)
cr0 |= X86_CR0_PG | X86_CR0_WP; hcr0 |= X86_CR0_PG | X86_CR0_WP;
/* /*
* re-enable caching here because the QEMU bios * re-enable caching here because the QEMU bios
...@@ -1776,10 +1749,27 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1776,10 +1749,27 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
* reboot * reboot
*/ */
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cr0 &= ~(X86_CR0_CD | X86_CR0_NW); hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
svm->vmcb->save.cr0 = hcr0;
vmcb_mark_dirty(svm->vmcb, VMCB_CR); vmcb_mark_dirty(svm->vmcb, VMCB_CR);
update_cr0_intercept(svm);
/*
* SEV-ES guests must always keep the CR intercepts cleared. CR
* tracking is done using the CR write traps.
*/
if (sev_es_guest(svm->vcpu.kvm))
return;
if (hcr0 == cr0) {
/* Selective CR0 write remains on. */
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
} else {
svm_set_intercept(svm, INTERCEPT_CR0_READ);
svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
}
} }
static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment