Commit bb3541f1 authored by Andrea Gelmini's avatar Andrea Gelmini Committed by Paolo Bonzini

KVM: x86: Fix typos

Signed-off-by: default avatarAndrea Gelmini <andrea.gelmini@gelma.net>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 960cb306
...@@ -89,7 +89,7 @@ In mmu_spte_clear_track_bits(): ...@@ -89,7 +89,7 @@ In mmu_spte_clear_track_bits():
old_spte = *spte; old_spte = *spte;
/* 'if' condition is satisfied. */ /* 'if' condition is satisfied. */
if (old_spte.Accssed == 1 && if (old_spte.Accessed == 1 &&
old_spte.W == 0) old_spte.W == 0)
spte = 0ull; spte = 0ull;
on fast page fault path: on fast page fault path:
...@@ -102,7 +102,7 @@ In mmu_spte_clear_track_bits(): ...@@ -102,7 +102,7 @@ In mmu_spte_clear_track_bits():
old_spte = xchg(spte, 0ull) old_spte = xchg(spte, 0ull)
if (old_spte.Accssed == 1) if (old_spte.Accessed == 1)
kvm_set_pfn_accessed(spte.pfn); kvm_set_pfn_accessed(spte.pfn);
if (old_spte.Dirty == 1) if (old_spte.Dirty == 1)
kvm_set_pfn_dirty(spte.pfn); kvm_set_pfn_dirty(spte.pfn);
......
...@@ -523,7 +523,7 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte) ...@@ -523,7 +523,7 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
} }
/* Rules for using mmu_spte_update: /* Rules for using mmu_spte_update:
* Update the state bits, it means the mapped pfn is not changged. * Update the state bits, it means the mapped pfn is not changed.
* *
* Whenever we overwrite a writable spte with a read-only one we * Whenever we overwrite a writable spte with a read-only one we
* should flush remote TLBs. Otherwise rmap_write_protect * should flush remote TLBs. Otherwise rmap_write_protect
......
...@@ -93,7 +93,7 @@ static unsigned intel_find_fixed_event(int idx) ...@@ -93,7 +93,7 @@ static unsigned intel_find_fixed_event(int idx)
return intel_arch_events[fixed_pmc_events[idx]].event_type; return intel_arch_events[fixed_pmc_events[idx]].event_type;
} }
/* check if a PMC is enabled by comparising it with globl_ctrl bits. */ /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
{ {
struct kvm_pmu *pmu = pmc_to_pmu(pmc); struct kvm_pmu *pmu = pmc_to_pmu(pmc);
......
...@@ -1572,7 +1572,7 @@ static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) ...@@ -1572,7 +1572,7 @@ static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{ {
/* /*
* Any change of EFLAGS.VM is accompained by a reload of SS * Any change of EFLAGS.VM is accompanied by a reload of SS
* (caused by either a task switch or an inter-privilege IRET), * (caused by either a task switch or an inter-privilege IRET),
* so we do not need to update the CPL here. * so we do not need to update the CPL here.
*/ */
......
...@@ -3364,7 +3364,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ...@@ -3364,7 +3364,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
/* /*
* Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
* but due to arrata below it can't be used. Workaround is to use * but due to errata below it can't be used. Workaround is to use
* msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
* *
* VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
......
...@@ -8418,7 +8418,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, ...@@ -8418,7 +8418,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
/* /*
* When producer of consumer is unregistered, we change back to * When producer of consumer is unregistered, we change back to
* remapped mode, so we can re-use the current implementation * remapped mode, so we can re-use the current implementation
* when the irq is masked/disabed or the consumer side (KVM * when the irq is masked/disabled or the consumer side (KVM
* int this case doesn't want to receive the interrupts. * int this case doesn't want to receive the interrupts.
*/ */
ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment