Commit 193015ad authored by Cathy Avery's avatar Cathy Avery Committed by Paolo Bonzini

KVM: nSVM: Track the ASID generation of the vmcb vmrun through the vmcb

This patch moves the asid_generation from the vcpu to the vmcb
in order to track the ASID generation that was active the last
time the vmcb was run. If sd->asid_generation changes between
two runs, the old ASID is invalid and must be changed.
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarCathy Avery <cavery@redhat.com>
Message-Id: <20210112164313.4204-3-cavery@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent af18fa77
...@@ -1229,7 +1229,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1229,7 +1229,7 @@ static void init_vmcb(struct vcpu_svm *svm)
save->cr3 = 0; save->cr3 = 0;
save->cr4 = 0; save->cr4 = 0;
} }
svm->asid_generation = 0; svm->current_vmcb->asid_generation = 0;
svm->asid = 0; svm->asid = 0;
svm->nested.vmcb12_gpa = 0; svm->nested.vmcb12_gpa = 0;
...@@ -1311,13 +1311,6 @@ void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) ...@@ -1311,13 +1311,6 @@ void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
svm->vmcb = target_vmcb->ptr; svm->vmcb = target_vmcb->ptr;
svm->vmcb_pa = target_vmcb->pa; svm->vmcb_pa = target_vmcb->pa;
/*
* Workaround: we don't yet track the ASID generation
* that was active the last time target_vmcb was run.
*/
svm->asid_generation = 0;
/* /*
* Track the physical CPU the target_vmcb is running on * Track the physical CPU the target_vmcb is running on
* in order to mark the VMCB dirty if the cpu changes at * in order to mark the VMCB dirty if the cpu changes at
...@@ -1384,7 +1377,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -1384,7 +1377,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
if (vmsa_page) if (vmsa_page)
svm->vmsa = page_address(vmsa_page); svm->vmsa = page_address(vmsa_page);
svm->asid_generation = 0;
svm->guest_state_loaded = false; svm->guest_state_loaded = false;
svm_switch_vmcb(svm, &svm->vmcb01); svm_switch_vmcb(svm, &svm->vmcb01);
...@@ -1866,7 +1858,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) ...@@ -1866,7 +1858,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
vmcb_mark_dirty(svm->vmcb, VMCB_ASID); vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
} }
svm->asid_generation = sd->asid_generation; svm->current_vmcb->asid_generation = sd->asid_generation;
svm->asid = sd->next_asid++; svm->asid = sd->next_asid++;
} }
...@@ -3434,10 +3426,11 @@ static void pre_svm_run(struct vcpu_svm *svm) ...@@ -3434,10 +3426,11 @@ static void pre_svm_run(struct vcpu_svm *svm)
/* /*
* If the previous vmrun of the vmcb occurred on * If the previous vmrun of the vmcb occurred on
* a different physical cpu then we must mark the vmcb dirty. * a different physical cpu then we must mark the vmcb dirty.
*/ * and assign a new asid.
*/
if (unlikely(svm->current_vmcb->cpu != svm->vcpu.cpu)) { if (unlikely(svm->current_vmcb->cpu != svm->vcpu.cpu)) {
svm->asid_generation = 0; svm->current_vmcb->asid_generation = 0;
vmcb_mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
svm->current_vmcb->cpu = svm->vcpu.cpu; svm->current_vmcb->cpu = svm->vcpu.cpu;
} }
...@@ -3446,7 +3439,7 @@ static void pre_svm_run(struct vcpu_svm *svm) ...@@ -3446,7 +3439,7 @@ static void pre_svm_run(struct vcpu_svm *svm)
return pre_sev_run(svm, svm->vcpu.cpu); return pre_sev_run(svm, svm->vcpu.cpu);
/* FIXME: handle wraparound of asid_generation */ /* FIXME: handle wraparound of asid_generation */
if (svm->asid_generation != sd->asid_generation) if (svm->current_vmcb->asid_generation != sd->asid_generation)
new_asid(svm, sd); new_asid(svm, sd);
} }
...@@ -3670,7 +3663,7 @@ void svm_flush_tlb(struct kvm_vcpu *vcpu) ...@@ -3670,7 +3663,7 @@ void svm_flush_tlb(struct kvm_vcpu *vcpu)
if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
else else
svm->asid_generation--; svm->current_vmcb->asid_generation--;
} }
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
......
...@@ -85,6 +85,7 @@ struct kvm_vmcb_info { ...@@ -85,6 +85,7 @@ struct kvm_vmcb_info {
struct vmcb *ptr; struct vmcb *ptr;
unsigned long pa; unsigned long pa;
int cpu; int cpu;
uint64_t asid_generation;
}; };
struct svm_nested_state { struct svm_nested_state {
...@@ -114,7 +115,6 @@ struct vcpu_svm { ...@@ -114,7 +115,6 @@ struct vcpu_svm {
struct kvm_vmcb_info *current_vmcb; struct kvm_vmcb_info *current_vmcb;
struct svm_cpu_data *svm_data; struct svm_cpu_data *svm_data;
u32 asid; u32 asid;
uint64_t asid_generation;
uint64_t sysenter_esp; uint64_t sysenter_esp;
uint64_t sysenter_eip; uint64_t sysenter_eip;
uint64_t tsc_aux; uint64_t tsc_aux;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment