Commit 343f94fe authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity

KVM: consolidate ioapic/ipi interrupt delivery logic

Use kvm_apic_match_dest() in kvm_get_intr_delivery_bitmask() instead
of duplicating the same code. Use kvm_get_intr_delivery_bitmask() in
apic_send_ipi() to figure out ipi destination instead of reimplementing
the logic.
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent a53c17d2
...@@ -1852,6 +1852,14 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, ...@@ -1852,6 +1852,14 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
return lvcpu; return lvcpu;
} }
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode)
{
return (dest_mode == 0) ?
kvm_apic_match_physical_addr(target, dest) :
kvm_apic_match_logical_addr(target, dest);
}
static int find_highest_bits(int *dat) static int find_highest_bits(int *dat)
{ {
u32 bits, bitnum; u32 bits, bitnum;
......
...@@ -20,6 +20,9 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu); ...@@ -20,6 +20,9 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode);
bool kvm_apic_present(struct kvm_vcpu *vcpu);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig);
#endif #endif
...@@ -260,7 +260,7 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) ...@@ -260,7 +260,7 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
{ {
return kvm_apic_id(apic) == dest; return dest == 0xff || kvm_apic_id(apic) == dest;
} }
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
...@@ -289,37 +289,34 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) ...@@ -289,37 +289,34 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
return result; return result;
} }
static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode) int short_hand, int dest, int dest_mode)
{ {
int result = 0; int result = 0;
struct kvm_lapic *target = vcpu->arch.apic; struct kvm_lapic *target = vcpu->arch.apic;
apic_debug("target %p, source %p, dest 0x%x, " apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x", "dest_mode 0x%x, short_hand 0x%x\n",
target, source, dest, dest_mode, short_hand); target, source, dest, dest_mode, short_hand);
ASSERT(!target); ASSERT(!target);
switch (short_hand) { switch (short_hand) {
case APIC_DEST_NOSHORT: case APIC_DEST_NOSHORT:
if (dest_mode == 0) { if (dest_mode == 0)
/* Physical mode. */ /* Physical mode. */
if ((dest == 0xFF) || (dest == kvm_apic_id(target))) result = kvm_apic_match_physical_addr(target, dest);
result = 1; else
} else
/* Logical mode. */ /* Logical mode. */
result = kvm_apic_match_logical_addr(target, dest); result = kvm_apic_match_logical_addr(target, dest);
break; break;
case APIC_DEST_SELF: case APIC_DEST_SELF:
if (target == source) result = (target == source);
result = 1;
break; break;
case APIC_DEST_ALLINC: case APIC_DEST_ALLINC:
result = 1; result = 1;
break; break;
case APIC_DEST_ALLBUT: case APIC_DEST_ALLBUT:
if (target != source) result = (target != source);
result = 1;
break; break;
default: default:
printk(KERN_WARNING "Bad dest shorthand value %x\n", printk(KERN_WARNING "Bad dest shorthand value %x\n",
...@@ -492,39 +489,27 @@ static void apic_send_ipi(struct kvm_lapic *apic) ...@@ -492,39 +489,27 @@ static void apic_send_ipi(struct kvm_lapic *apic)
unsigned int delivery_mode = icr_low & APIC_MODE_MASK; unsigned int delivery_mode = icr_low & APIC_MODE_MASK;
unsigned int vector = icr_low & APIC_VECTOR_MASK; unsigned int vector = icr_low & APIC_VECTOR_MASK;
struct kvm_vcpu *target; DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
struct kvm_vcpu *vcpu;
DECLARE_BITMAP(lpr_map, KVM_MAX_VCPUS);
int i; int i;
bitmap_zero(lpr_map, KVM_MAX_VCPUS);
apic_debug("icr_high 0x%x, icr_low 0x%x, " apic_debug("icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
"dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
icr_high, icr_low, short_hand, dest, icr_high, icr_low, short_hand, dest,
trig_mode, level, dest_mode, delivery_mode, vector); trig_mode, level, dest_mode, delivery_mode, vector);
for (i = 0; i < KVM_MAX_VCPUS; i++) { kvm_get_intr_delivery_bitmask(apic->vcpu->kvm, apic, dest, dest_mode,
vcpu = apic->vcpu->kvm->vcpus[i]; delivery_mode == APIC_DM_LOWEST, short_hand,
if (!vcpu) deliver_bitmask);
continue;
if (vcpu->arch.apic && while ((i = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) { < KVM_MAX_VCPUS) {
if (delivery_mode == APIC_DM_LOWEST) struct kvm_vcpu *vcpu = apic->vcpu->kvm->vcpus[i];
__set_bit(vcpu->vcpu_id, lpr_map); __clear_bit(i, deliver_bitmask);
else if (vcpu)
__apic_accept_irq(vcpu->arch.apic, delivery_mode, __apic_accept_irq(vcpu->arch.apic, delivery_mode,
vector, level, trig_mode); vector, level, trig_mode);
} }
}
if (delivery_mode == APIC_DM_LOWEST) {
target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
if (target != NULL)
__apic_accept_irq(target->arch.apic, delivery_mode,
vector, level, trig_mode);
}
} }
static u32 apic_get_tmcct(struct kvm_lapic *apic) static u32 apic_get_tmcct(struct kvm_lapic *apic)
...@@ -930,16 +915,14 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) ...@@ -930,16 +915,14 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_lapic_reset); EXPORT_SYMBOL_GPL(kvm_lapic_reset);
int kvm_lapic_enabled(struct kvm_vcpu *vcpu) bool kvm_apic_present(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; return vcpu->arch.apic && apic_hw_enabled(vcpu->arch.apic);
int ret = 0; }
if (!apic)
return 0;
ret = apic_enabled(apic);
return ret; int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
{
return kvm_apic_present(vcpu) && apic_sw_enabled(vcpu->arch.apic);
} }
EXPORT_SYMBOL_GPL(kvm_lapic_enabled); EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
......
...@@ -37,6 +37,8 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); ...@@ -37,6 +37,8 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu); void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
int kvm_lapic_enabled(struct kvm_vcpu *vcpu); int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
bool kvm_apic_present(struct kvm_vcpu *vcpu);
bool kvm_lapic_present(struct kvm_vcpu *vcpu);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
......
...@@ -363,11 +363,6 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, ...@@ -363,11 +363,6 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
struct kvm_irq_mask_notifier *kimn); struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
#ifdef __KVM_HAVE_IOAPIC
void kvm_get_intr_delivery_bitmask(struct kvm *kvm,
union kvm_ioapic_redirect_entry *entry,
unsigned long *deliver_bitmask);
#endif
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm, void kvm_register_irq_ack_notifier(struct kvm *kvm,
......
...@@ -147,7 +147,10 @@ int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e) ...@@ -147,7 +147,10 @@ int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e)
DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS); DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
int i, r = -1; int i, r = -1;
kvm_get_intr_delivery_bitmask(kvm, e, deliver_bitmask); kvm_get_intr_delivery_bitmask(kvm, NULL, e->fields.dest_id,
e->fields.dest_mode,
e->fields.delivery_mode == IOAPIC_LOWEST_PRIORITY,
0, deliver_bitmask);
if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) { if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
ioapic_debug("no target on destination\n"); ioapic_debug("no target on destination\n");
......
...@@ -66,12 +66,14 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) ...@@ -66,12 +66,14 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
unsigned long *bitmap); unsigned long *bitmap);
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode);
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm); int kvm_ioapic_init(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic); void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
void kvm_get_intr_delivery_bitmask(struct kvm *kvm, void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src,
union kvm_ioapic_redirect_entry *entry, int dest_id, int dest_mode, bool low_prio, int short_hand,
unsigned long *deliver_bitmask); unsigned long *deliver_bitmask);
int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e); int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e);
#endif #endif
...@@ -43,67 +43,35 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, ...@@ -43,67 +43,35 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level); return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
} }
void kvm_get_intr_delivery_bitmask(struct kvm *kvm, void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src,
union kvm_ioapic_redirect_entry *entry, int dest_id, int dest_mode, bool low_prio, int short_hand,
unsigned long *deliver_bitmask) unsigned long *deliver_bitmask)
{ {
int i; int i;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); if (dest_mode == 0 && dest_id == 0xff && low_prio)
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
if (entry->fields.dest_mode == 0) { /* Physical mode. */ bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
if (entry->fields.dest_id == 0xFF) { /* Broadcast. */ for (i = 0; i < KVM_MAX_VCPUS; i++) {
for (i = 0; i < KVM_MAX_VCPUS; ++i)
if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
__set_bit(i, deliver_bitmask);
/* Lowest priority shouldn't combine with broadcast */
if (entry->fields.delivery_mode ==
IOAPIC_LOWEST_PRIORITY && printk_ratelimit())
printk(KERN_INFO "kvm: apic: phys broadcast "
"and lowest prio\n");
return;
}
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i]; vcpu = kvm->vcpus[i];
if (!vcpu)
if (!vcpu || !kvm_apic_present(vcpu))
continue; continue;
if (kvm_apic_match_physical_addr(vcpu->arch.apic,
entry->fields.dest_id)) { if (!kvm_apic_match_dest(vcpu, src, short_hand, dest_id,
if (vcpu->arch.apic) dest_mode))
__set_bit(i, deliver_bitmask);
break;
}
}
} else if (entry->fields.dest_id != 0) /* Logical mode, MDA non-zero. */
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
if (!vcpu)
continue; continue;
if (vcpu->arch.apic &&
kvm_apic_match_logical_addr(vcpu->arch.apic,
entry->fields.dest_id))
__set_bit(i, deliver_bitmask); __set_bit(i, deliver_bitmask);
} }
switch (entry->fields.delivery_mode) { if (low_prio) {
case IOAPIC_LOWEST_PRIORITY: vcpu = kvm_get_lowest_prio_vcpu(kvm, 0, deliver_bitmask);
/* Select one in deliver_bitmask */
vcpu = kvm_get_lowest_prio_vcpu(kvm,
entry->fields.vector, deliver_bitmask);
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
if (!vcpu) if (vcpu)
return;
__set_bit(vcpu->vcpu_id, deliver_bitmask); __set_bit(vcpu->vcpu_id, deliver_bitmask);
break;
case IOAPIC_FIXED:
case IOAPIC_NMI:
break;
default:
if (printk_ratelimit())
printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
entry->fields.delivery_mode);
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment