Commit 44d52717 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: LAPIC: ensure APIC map is up to date on concurrent update requests

The following race can cause lost map update events:

         cpu1                            cpu2

                                apic_map_dirty = true
  ------------------------------------------------------------
                                kvm_recalculate_apic_map:
                                     pass check
                                         mutex_lock(&kvm->arch.apic_map_lock);
                                         if (!kvm->arch.apic_map_dirty)
                                     and in process of updating map
  -------------------------------------------------------------
    other calls to
       apic_map_dirty = true         might be too late for affected cpu
  -------------------------------------------------------------
                                     apic_map_dirty = false
  -------------------------------------------------------------
    kvm_recalculate_apic_map:
    bail out on
      if (!kvm->arch.apic_map_dirty)

To fix it, record the beginning of an update of the APIC map in
apic_map_dirty.  If another APIC map change switches apic_map_dirty
back to DIRTY during the update, kvm_recalculate_apic_map should not
make it CLEAN, and the other caller will go through the slow path.
Reported-by: default avatarIgor Mammedov <imammedo@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent af28dfac
...@@ -943,7 +943,7 @@ struct kvm_arch { ...@@ -943,7 +943,7 @@ struct kvm_arch {
atomic_t vapics_in_nmi_mode; atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock; struct mutex apic_map_lock;
struct kvm_apic_map *apic_map; struct kvm_apic_map *apic_map;
bool apic_map_dirty; atomic_t apic_map_dirty;
bool apic_access_page_done; bool apic_access_page_done;
unsigned long apicv_inhibit_reasons; unsigned long apicv_inhibit_reasons;
......
...@@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu) ...@@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
kvfree(map); kvfree(map);
} }
/*
* CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
*
* DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
* apic_map_lock_held.
*/
enum {
CLEAN,
UPDATE_IN_PROGRESS,
DIRTY
};
void kvm_recalculate_apic_map(struct kvm *kvm) void kvm_recalculate_apic_map(struct kvm *kvm)
{ {
struct kvm_apic_map *new, *old = NULL; struct kvm_apic_map *new, *old = NULL;
...@@ -176,17 +188,17 @@ void kvm_recalculate_apic_map(struct kvm *kvm) ...@@ -176,17 +188,17 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
int i; int i;
u32 max_id = 255; /* enough space for any xAPIC ID */ u32 max_id = 255; /* enough space for any xAPIC ID */
if (!kvm->arch.apic_map_dirty) { /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
/* if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
* Read kvm->arch.apic_map_dirty before
* kvm->arch.apic_map
*/
smp_rmb();
return; return;
}
mutex_lock(&kvm->arch.apic_map_lock); mutex_lock(&kvm->arch.apic_map_lock);
if (!kvm->arch.apic_map_dirty) { /*
* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
* (if clean) or the APIC registers (if dirty).
*/
if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
/* Someone else has updated the map. */ /* Someone else has updated the map. */
mutex_unlock(&kvm->arch.apic_map_lock); mutex_unlock(&kvm->arch.apic_map_lock);
return; return;
...@@ -256,11 +268,11 @@ void kvm_recalculate_apic_map(struct kvm *kvm) ...@@ -256,11 +268,11 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
lockdep_is_held(&kvm->arch.apic_map_lock)); lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new); rcu_assign_pointer(kvm->arch.apic_map, new);
/* /*
* Write kvm->arch.apic_map before * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
* clearing apic->apic_map_dirty * If another update has come in, leave it DIRTY.
*/ */
smp_wmb(); atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
kvm->arch.apic_map_dirty = false; UPDATE_IN_PROGRESS, CLEAN);
mutex_unlock(&kvm->arch.apic_map_lock); mutex_unlock(&kvm->arch.apic_map_lock);
if (old) if (old)
...@@ -282,20 +294,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) ...@@ -282,20 +294,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
else else
static_key_slow_inc(&apic_sw_disabled.key); static_key_slow_inc(&apic_sw_disabled.key);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
} }
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{ {
kvm_lapic_set_reg(apic, APIC_ID, id << 24); kvm_lapic_set_reg(apic, APIC_ID, id << 24);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{ {
kvm_lapic_set_reg(apic, APIC_LDR, id); kvm_lapic_set_reg(apic, APIC_LDR, id);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id) static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
...@@ -311,7 +323,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) ...@@ -311,7 +323,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
kvm_lapic_set_reg(apic, APIC_ID, id); kvm_lapic_set_reg(apic, APIC_ID, id);
kvm_lapic_set_reg(apic, APIC_LDR, ldr); kvm_lapic_set_reg(apic, APIC_LDR, ldr);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
...@@ -1976,7 +1988,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) ...@@ -1976,7 +1988,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_DFR: case APIC_DFR:
if (!apic_x2apic_mode(apic)) { if (!apic_x2apic_mode(apic)) {
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} else } else
ret = 1; ret = 1;
break; break;
...@@ -2232,7 +2244,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) ...@@ -2232,7 +2244,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
static_key_slow_dec_deferred(&apic_hw_disabled); static_key_slow_dec_deferred(&apic_hw_disabled);
} else { } else {
static_key_slow_inc(&apic_hw_disabled.key); static_key_slow_inc(&apic_hw_disabled.key);
vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
} }
...@@ -2273,7 +2285,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -2273,7 +2285,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!apic) if (!apic)
return; return;
vcpu->kvm->arch.apic_map_dirty = false;
/* Stop the timer in case it's a reset to an active apic */ /* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer); hrtimer_cancel(&apic->lapic_timer.timer);
...@@ -2567,7 +2578,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) ...@@ -2567,7 +2578,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
} }
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
kvm_recalculate_apic_map(vcpu->kvm); kvm_recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu); kvm_apic_set_version(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment