Commit bce87cce authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: consolidate different ways to test for in-kernel LAPIC

Different pieces of code checked for vcpu->arch.apic being (non-)NULL,
or used kvm_vcpu_has_lapic (more optimized) or lapic_in_kernel.
Replace everything with lapic_in_kernel's name and kvm_vcpu_has_lapic's
implementation.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1e3161b4
...@@ -109,14 +109,6 @@ static inline int irqchip_in_kernel(struct kvm *kvm) ...@@ -109,14 +109,6 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
return ret; return ret;
} }
static inline int lapic_in_kernel(struct kvm_vcpu *vcpu)
{
/* Same as irqchip_in_kernel(vcpu->kvm), but with less
* pointer chasing and no unnecessary memory barriers.
*/
return vcpu->arch.apic != NULL;
}
void kvm_pic_reset(struct kvm_kpic_state *s); void kvm_pic_reset(struct kvm_kpic_state *s);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
......
...@@ -281,7 +281,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu) ...@@ -281,7 +281,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
struct kvm_cpuid_entry2 *feat; struct kvm_cpuid_entry2 *feat;
u32 v = APIC_VERSION; u32 v = APIC_VERSION;
if (!kvm_vcpu_has_lapic(vcpu)) if (!lapic_in_kernel(vcpu))
return; return;
feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
...@@ -1319,7 +1319,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) ...@@ -1319,7 +1319,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u64 guest_tsc, tsc_deadline; u64 guest_tsc, tsc_deadline;
if (!kvm_vcpu_has_lapic(vcpu)) if (!lapic_in_kernel(vcpu))
return; return;
if (apic->lapic_timer.expired_tscdeadline == 0) if (apic->lapic_timer.expired_tscdeadline == 0)
...@@ -1645,7 +1645,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) ...@@ -1645,7 +1645,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic)) apic_lvtt_period(apic))
return 0; return 0;
...@@ -1656,7 +1656,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) ...@@ -1656,7 +1656,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic)) apic_lvtt_period(apic))
return; return;
...@@ -2001,7 +2001,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) ...@@ -2001,7 +2001,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{ {
struct hrtimer *timer; struct hrtimer *timer;
if (!kvm_vcpu_has_lapic(vcpu)) if (!lapic_in_kernel(vcpu))
return; return;
timer = &vcpu->arch.apic->lapic_timer.timer; timer = &vcpu->arch.apic->lapic_timer.timer;
...@@ -2174,7 +2174,7 @@ int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) ...@@ -2174,7 +2174,7 @@ int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu)) if (!lapic_in_kernel(vcpu))
return 1; return 1;
/* if this is ICR write vector before command */ /* if this is ICR write vector before command */
...@@ -2188,7 +2188,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) ...@@ -2188,7 +2188,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u32 low, high = 0; u32 low, high = 0;
if (!kvm_vcpu_has_lapic(vcpu)) if (!lapic_in_kernel(vcpu))
return 1; return 1;
if (apic_reg_read(apic, reg, 4, &low)) if (apic_reg_read(apic, reg, 4, &low))
...@@ -2220,7 +2220,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) ...@@ -2220,7 +2220,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
u8 sipi_vector; u8 sipi_vector;
unsigned long pe; unsigned long pe;
if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) if (!lapic_in_kernel(vcpu) || !apic->pending_events)
return; return;
/* /*
......
...@@ -103,7 +103,7 @@ static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off) ...@@ -103,7 +103,7 @@ static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
extern struct static_key kvm_no_apic_vcpu; extern struct static_key kvm_no_apic_vcpu;
static inline bool kvm_vcpu_has_lapic(struct kvm_vcpu *vcpu) static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
{ {
if (static_key_false(&kvm_no_apic_vcpu)) if (static_key_false(&kvm_no_apic_vcpu))
return vcpu->arch.apic; return vcpu->arch.apic;
...@@ -130,7 +130,7 @@ static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic) ...@@ -130,7 +130,7 @@ static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic)
static inline bool kvm_apic_present(struct kvm_vcpu *vcpu) static inline bool kvm_apic_present(struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_has_lapic(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic); return lapic_in_kernel(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
} }
static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu) static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
...@@ -150,7 +150,7 @@ static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu) ...@@ -150,7 +150,7 @@ static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu) static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events; return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events;
} }
static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq) static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
...@@ -161,7 +161,7 @@ static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq) ...@@ -161,7 +161,7 @@ static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu) static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_has_lapic(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
} }
static inline int kvm_apic_id(struct kvm_lapic *apic) static inline int kvm_apic_id(struct kvm_lapic *apic)
......
...@@ -257,7 +257,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) ...@@ -257,7 +257,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.apic) if (lapic_in_kernel(vcpu))
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
} }
......
...@@ -2984,7 +2984,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2984,7 +2984,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
kvm_vcpu_has_lapic(vcpu)) lapic_in_kernel(vcpu))
vcpu->arch.apic->sipi_vector = events->sipi_vector; vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) { if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
...@@ -2997,7 +2997,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2997,7 +2997,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
else else
vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
if (kvm_vcpu_has_lapic(vcpu)) { if (lapic_in_kernel(vcpu)) {
if (events->smi.latched_init) if (events->smi.latched_init)
set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
else else
...@@ -3237,7 +3237,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -3237,7 +3237,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
switch (ioctl) { switch (ioctl) {
case KVM_GET_LAPIC: { case KVM_GET_LAPIC: {
r = -EINVAL; r = -EINVAL;
if (!vcpu->arch.apic) if (!lapic_in_kernel(vcpu))
goto out; goto out;
u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
...@@ -3255,7 +3255,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -3255,7 +3255,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
} }
case KVM_SET_LAPIC: { case KVM_SET_LAPIC: {
r = -EINVAL; r = -EINVAL;
if (!vcpu->arch.apic) if (!lapic_in_kernel(vcpu))
goto out; goto out;
u.lapic = memdup_user(argp, sizeof(*u.lapic)); u.lapic = memdup_user(argp, sizeof(*u.lapic));
if (IS_ERR(u.lapic)) if (IS_ERR(u.lapic))
...@@ -4090,7 +4090,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, ...@@ -4090,7 +4090,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
do { do {
n = min(len, 8); n = min(len, 8);
if (!(vcpu->arch.apic && if (!(lapic_in_kernel(vcpu) &&
!kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
break; break;
...@@ -4110,7 +4110,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) ...@@ -4110,7 +4110,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
do { do {
n = min(len, 8); n = min(len, 8);
if (!(vcpu->arch.apic && if (!(lapic_in_kernel(vcpu) &&
!kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
addr, n, v)) addr, n, v))
&& kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
...@@ -6007,7 +6007,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) ...@@ -6007,7 +6007,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
if (!kvm_x86_ops->update_cr8_intercept) if (!kvm_x86_ops->update_cr8_intercept)
return; return;
if (!vcpu->arch.apic) if (!lapic_in_kernel(vcpu))
return; return;
if (vcpu->arch.apicv_active) if (vcpu->arch.apicv_active)
...@@ -7035,7 +7035,7 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, ...@@ -7035,7 +7035,7 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
if (!kvm_vcpu_has_lapic(vcpu) && if (!lapic_in_kernel(vcpu) &&
mp_state->mp_state != KVM_MP_STATE_RUNNABLE) mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
return -EINVAL; return -EINVAL;
...@@ -7590,6 +7590,7 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) ...@@ -7590,6 +7590,7 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
} }
struct static_key kvm_no_apic_vcpu __read_mostly; struct static_key kvm_no_apic_vcpu __read_mostly;
EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment