Commit 06e7852c authored by Joerg Roedel's avatar Joerg Roedel Committed by Paolo Bonzini

KVM: SVM: Add vmcb_ prefix to mark_*() functions

Make it more clear what data structure these functions operate on.

No functional changes.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Message-Id: <20200625080325.28439-3-joro@8bytes.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7693b3eb
...@@ -665,7 +665,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) ...@@ -665,7 +665,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
} else { } else {
vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK; vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
} }
mark_dirty(vmcb, VMCB_AVIC); vmcb_mark_dirty(vmcb, VMCB_AVIC);
svm_set_pi_irte_mode(vcpu, activated); svm_set_pi_irte_mode(vcpu, activated);
} }
......
...@@ -106,7 +106,7 @@ void recalc_intercepts(struct vcpu_svm *svm) ...@@ -106,7 +106,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
{ {
struct vmcb_control_area *c, *h, *g; struct vmcb_control_area *c, *h, *g;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS); vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
if (!is_guest_mode(&svm->vcpu)) if (!is_guest_mode(&svm->vcpu))
return; return;
...@@ -378,7 +378,7 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm) ...@@ -378,7 +378,7 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
*/ */
recalc_intercepts(svm); recalc_intercepts(svm);
mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
} }
void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
...@@ -601,7 +601,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -601,7 +601,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vmcb->save.cpl = 0; svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0; svm->vmcb->control.exit_int_info = 0;
mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
trace_kvm_nested_vmexit_inject(nested_vmcb->control.exit_code, trace_kvm_nested_vmexit_inject(nested_vmcb->control.exit_code,
nested_vmcb->control.exit_info_1, nested_vmcb->control.exit_info_1,
......
...@@ -1191,5 +1191,5 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) ...@@ -1191,5 +1191,5 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
sd->sev_vmcbs[asid] = svm->vmcb; sd->sev_vmcbs[asid] = svm->vmcb;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
mark_dirty(svm->vmcb, VMCB_ASID); vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
} }
...@@ -282,7 +282,7 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -282,7 +282,7 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
} }
svm->vmcb->save.efer = efer | EFER_SVME; svm->vmcb->save.efer = efer | EFER_SVME;
mark_dirty(svm->vmcb, VMCB_CR); vmcb_mark_dirty(svm->vmcb, VMCB_CR);
} }
static int is_external_interrupt(u32 info) static int is_external_interrupt(u32 info)
...@@ -713,7 +713,7 @@ static void grow_ple_window(struct kvm_vcpu *vcpu) ...@@ -713,7 +713,7 @@ static void grow_ple_window(struct kvm_vcpu *vcpu)
pause_filter_count_max); pause_filter_count_max);
if (control->pause_filter_count != old) { if (control->pause_filter_count != old) {
mark_dirty(svm->vmcb, VMCB_INTERCEPTS); vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
trace_kvm_ple_window_update(vcpu->vcpu_id, trace_kvm_ple_window_update(vcpu->vcpu_id,
control->pause_filter_count, old); control->pause_filter_count, old);
} }
...@@ -731,7 +731,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu) ...@@ -731,7 +731,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
pause_filter_count_shrink, pause_filter_count_shrink,
pause_filter_count); pause_filter_count);
if (control->pause_filter_count != old) { if (control->pause_filter_count != old) {
mark_dirty(svm->vmcb, VMCB_INTERCEPTS); vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
trace_kvm_ple_window_update(vcpu->vcpu_id, trace_kvm_ple_window_update(vcpu->vcpu_id,
control->pause_filter_count, old); control->pause_filter_count, old);
} }
...@@ -966,7 +966,7 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) ...@@ -966,7 +966,7 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
svm->vmcb->control.tsc_offset = offset + g_tsc_offset; svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS); vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
return svm->vmcb->control.tsc_offset; return svm->vmcb->control.tsc_offset;
} }
...@@ -1123,7 +1123,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1123,7 +1123,7 @@ static void init_vmcb(struct vcpu_svm *svm)
clr_exception_intercept(svm, UD_VECTOR); clr_exception_intercept(svm, UD_VECTOR);
} }
mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
enable_gif(svm); enable_gif(svm);
...@@ -1257,7 +1257,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1257,7 +1257,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (unlikely(cpu != vcpu->cpu)) { if (unlikely(cpu != vcpu->cpu)) {
svm->asid_generation = 0; svm->asid_generation = 0;
mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -1367,7 +1367,7 @@ static void svm_set_vintr(struct vcpu_svm *svm) ...@@ -1367,7 +1367,7 @@ static void svm_set_vintr(struct vcpu_svm *svm)
control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl &= ~V_INTR_PRIO_MASK;
control->int_ctl |= V_IRQ_MASK | control->int_ctl |= V_IRQ_MASK |
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
mark_dirty(svm->vmcb, VMCB_INTR); vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
} }
static void svm_clear_vintr(struct vcpu_svm *svm) static void svm_clear_vintr(struct vcpu_svm *svm)
...@@ -1385,7 +1385,7 @@ static void svm_clear_vintr(struct vcpu_svm *svm) ...@@ -1385,7 +1385,7 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask; svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
} }
mark_dirty(svm->vmcb, VMCB_INTR); vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
} }
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
...@@ -1503,7 +1503,7 @@ static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) ...@@ -1503,7 +1503,7 @@ static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
svm->vmcb->save.idtr.limit = dt->size; svm->vmcb->save.idtr.limit = dt->size;
svm->vmcb->save.idtr.base = dt->address ; svm->vmcb->save.idtr.base = dt->address ;
mark_dirty(svm->vmcb, VMCB_DT); vmcb_mark_dirty(svm->vmcb, VMCB_DT);
} }
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
...@@ -1520,7 +1520,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) ...@@ -1520,7 +1520,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
svm->vmcb->save.gdtr.limit = dt->size; svm->vmcb->save.gdtr.limit = dt->size;
svm->vmcb->save.gdtr.base = dt->address ; svm->vmcb->save.gdtr.base = dt->address ;
mark_dirty(svm->vmcb, VMCB_DT); vmcb_mark_dirty(svm->vmcb, VMCB_DT);
} }
static void update_cr0_intercept(struct vcpu_svm *svm) static void update_cr0_intercept(struct vcpu_svm *svm)
...@@ -1531,7 +1531,7 @@ static void update_cr0_intercept(struct vcpu_svm *svm) ...@@ -1531,7 +1531,7 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
| (gcr0 & SVM_CR0_SELECTIVE_MASK); | (gcr0 & SVM_CR0_SELECTIVE_MASK);
mark_dirty(svm->vmcb, VMCB_CR); vmcb_mark_dirty(svm->vmcb, VMCB_CR);
if (gcr0 == *hcr0) { if (gcr0 == *hcr0) {
clr_cr_intercept(svm, INTERCEPT_CR0_READ); clr_cr_intercept(svm, INTERCEPT_CR0_READ);
...@@ -1572,7 +1572,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1572,7 +1572,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cr0 &= ~(X86_CR0_CD | X86_CR0_NW); cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0; svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR); vmcb_mark_dirty(svm->vmcb, VMCB_CR);
update_cr0_intercept(svm); update_cr0_intercept(svm);
} }
...@@ -1592,7 +1592,7 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1592,7 +1592,7 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
cr4 |= X86_CR4_PAE; cr4 |= X86_CR4_PAE;
cr4 |= host_cr4_mce; cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4; to_svm(vcpu)->vmcb->save.cr4 = cr4;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
return 0; return 0;
} }
...@@ -1624,7 +1624,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, ...@@ -1624,7 +1624,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
/* This is symmetric with svm_get_segment() */ /* This is symmetric with svm_get_segment() */
svm->vmcb->save.cpl = (var->dpl & 3); svm->vmcb->save.cpl = (var->dpl & 3);
mark_dirty(svm->vmcb, VMCB_SEG); vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
} }
static void update_bp_intercept(struct kvm_vcpu *vcpu) static void update_bp_intercept(struct kvm_vcpu *vcpu)
...@@ -1651,7 +1651,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) ...@@ -1651,7 +1651,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->asid_generation = sd->asid_generation; svm->asid_generation = sd->asid_generation;
svm->vmcb->control.asid = sd->next_asid++; svm->vmcb->control.asid = sd->next_asid++;
mark_dirty(svm->vmcb, VMCB_ASID); vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
} }
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
...@@ -1660,7 +1660,7 @@ static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) ...@@ -1660,7 +1660,7 @@ static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
if (unlikely(value != vmcb->save.dr6)) { if (unlikely(value != vmcb->save.dr6)) {
vmcb->save.dr6 = value; vmcb->save.dr6 = value;
mark_dirty(vmcb, VMCB_DR); vmcb_mark_dirty(vmcb, VMCB_DR);
} }
} }
...@@ -1687,7 +1687,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) ...@@ -1687,7 +1687,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.dr7 = value; svm->vmcb->save.dr7 = value;
mark_dirty(svm->vmcb, VMCB_DR); vmcb_mark_dirty(svm->vmcb, VMCB_DR);
} }
static int pf_interception(struct vcpu_svm *svm) static int pf_interception(struct vcpu_svm *svm)
...@@ -2512,7 +2512,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -2512,7 +2512,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
return 1; return 1;
vcpu->arch.pat = data; vcpu->arch.pat = data;
svm->vmcb->save.g_pat = data; svm->vmcb->save.g_pat = data;
mark_dirty(svm->vmcb, VMCB_NPT); vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
break; break;
case MSR_IA32_SPEC_CTRL: case MSR_IA32_SPEC_CTRL:
if (!msr->host_initiated && if (!msr->host_initiated &&
...@@ -2617,7 +2617,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -2617,7 +2617,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
return 1; return 1;
svm->vmcb->save.dbgctl = data; svm->vmcb->save.dbgctl = data;
mark_dirty(svm->vmcb, VMCB_LBR); vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
if (data & (1ULL<<0)) if (data & (1ULL<<0))
svm_enable_lbrv(svm); svm_enable_lbrv(svm);
else else
...@@ -3476,7 +3476,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3476,7 +3476,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR)) SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm); svm_handle_mce(svm);
mark_all_clean(svm->vmcb); vmcb_mark_all_clean(svm->vmcb);
return exit_fastpath; return exit_fastpath;
} }
...@@ -3488,7 +3488,7 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root) ...@@ -3488,7 +3488,7 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
cr3 = __sme_set(root); cr3 = __sme_set(root);
if (npt_enabled) { if (npt_enabled) {
svm->vmcb->control.nested_cr3 = cr3; svm->vmcb->control.nested_cr3 = cr3;
mark_dirty(svm->vmcb, VMCB_NPT); vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
/* Loading L2's CR3 is handled by enter_svm_guest_mode. */ /* Loading L2's CR3 is handled by enter_svm_guest_mode. */
if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
...@@ -3497,7 +3497,7 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root) ...@@ -3497,7 +3497,7 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
} }
svm->vmcb->save.cr3 = cr3; svm->vmcb->save.cr3 = cr3;
mark_dirty(svm->vmcb, VMCB_CR); vmcb_mark_dirty(svm->vmcb, VMCB_CR);
} }
static int is_disabled(void) static int is_disabled(void)
......
...@@ -185,18 +185,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) ...@@ -185,18 +185,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
return container_of(kvm, struct kvm_svm, kvm); return container_of(kvm, struct kvm_svm, kvm);
} }
static inline void mark_all_dirty(struct vmcb *vmcb) static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
{ {
vmcb->control.clean = 0; vmcb->control.clean = 0;
} }
static inline void mark_all_clean(struct vmcb *vmcb) static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
{ {
vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
& ~VMCB_ALWAYS_DIRTY_MASK; & ~VMCB_ALWAYS_DIRTY_MASK;
} }
static inline void mark_dirty(struct vmcb *vmcb, int bit) static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
{ {
vmcb->control.clean &= ~(1 << bit); vmcb->control.clean &= ~(1 << bit);
} }
...@@ -417,7 +417,7 @@ extern int avic; ...@@ -417,7 +417,7 @@ extern int avic;
static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data) static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
{ {
svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK; svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
mark_dirty(svm->vmcb, VMCB_AVIC); vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
} }
static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu) static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment