Commit 15248258 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Call kvm_arch_memslots_updated() before updating memslots

kvm_arch_memslots_updated() is at this point in time an x86-specific
hook for handling MMIO generation wraparound.  x86 stashes 19 bits of
the memslots generation number in its MMIO sptes in order to avoid
full page fault walks for repeat faults on emulated MMIO addresses.
Because only 19 bits are used, wrapping the MMIO generation number is
possible, if unlikely.  kvm_arch_memslots_updated() alerts x86 that
the generation has changed so that it can invalidate all MMIO sptes in
case the effective MMIO generation has wrapped so as to avoid using a
stale spte, e.g. a (very) old spte that was created with generation==0.

Given that the purpose of kvm_arch_memslots_updated() is to prevent
consuming stale entries, it needs to be called before the new generation
is propagated to memslots.  Invalidating the MMIO sptes after updating
memslots means that there is a window where a vCPU could dereference
the new memslots generation, e.g. 0, and incorrectly reuse an old MMIO
spte that was created with (pre-wrap) generation==0.

Fixes: e59dbe09 ("KVM: Introduce kvm_arch_memslots_updated()")
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 41836839
...@@ -1134,7 +1134,7 @@ static inline void kvm_arch_hardware_unsetup(void) {} ...@@ -1134,7 +1134,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm, static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
......
...@@ -837,7 +837,7 @@ struct kvm_vcpu_arch { ...@@ -837,7 +837,7 @@ struct kvm_vcpu_arch {
static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {} static inline void kvm_arch_exit(void) {}
......
...@@ -878,7 +878,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} ...@@ -878,7 +878,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm, static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {} struct kvm_memory_slot *slot) {}
......
...@@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask); gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
......
...@@ -5893,13 +5893,13 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) ...@@ -5893,13 +5893,13 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
} }
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots) void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{ {
/* /*
* The very rare case: if the generation-number is round, * The very rare case: if the generation-number is round,
* zap all shadow pages. * zap all shadow pages.
*/ */
if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) { if (unlikely((gen & MMIO_GEN_MASK) == 0)) {
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm); kvm_mmu_invalidate_zap_all_pages(kvm);
} }
......
...@@ -9357,13 +9357,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -9357,13 +9357,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
return -ENOMEM; return -ENOMEM;
} }
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{ {
/* /*
* memslots->generation has been incremented. * memslots->generation has been incremented.
* mmio generation may have reached its maximum value. * mmio generation may have reached its maximum value.
*/ */
kvm_mmu_invalidate_mmio_sptes(kvm, slots); kvm_mmu_invalidate_mmio_sptes(kvm, gen);
} }
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
......
...@@ -634,7 +634,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, ...@@ -634,7 +634,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont); struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages); unsigned long npages);
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
const struct kvm_userspace_memory_region *mem, const struct kvm_userspace_memory_region *mem,
......
...@@ -2350,7 +2350,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -2350,7 +2350,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
return 0; return 0;
} }
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{ {
} }
......
...@@ -874,6 +874,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, ...@@ -874,6 +874,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
int as_id, struct kvm_memslots *slots) int as_id, struct kvm_memslots *slots)
{ {
struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
u64 gen;
/* /*
* Set the low bit in the generation, which disables SPTE caching * Set the low bit in the generation, which disables SPTE caching
...@@ -896,9 +897,11 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, ...@@ -896,9 +897,11 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
* space 0 will use generations 0, 4, 8, ... while * address space 1 will * space 0 will use generations 0, 4, 8, ... while * address space 1 will
* use generations 2, 6, 10, 14, ... * use generations 2, 6, 10, 14, ...
*/ */
slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1; gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
kvm_arch_memslots_updated(kvm, slots); kvm_arch_memslots_updated(kvm, gen);
slots->generation = gen;
return old_memslots; return old_memslots;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment