Commit b8e81a3b authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Marcelo Tosatti:
 "KVM bug fixes, including a SVM interrupt injection regression fix,
  MIPS and ARM bug fixes"

* git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: MIPS: Enable after disabling interrupt
  KVM: MIPS: Fix trace event to save PC directly
  KVM: SVM: fix interrupt injection (apic->isr_count always 0)
  KVM: emulate: fix CMPXCHG8B on 32-bit hosts
  KVM: VMX: fix build without CONFIG_SMP
  arm/arm64: KVM: Add exit reaons to kvm_exit event tracing
  ARM: KVM: Fix size check in __coherent_cache_guest_page
parents f2cb4777 cfec0e75
...@@ -207,7 +207,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, ...@@ -207,7 +207,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
VM_BUG_ON(size & PAGE_MASK); VM_BUG_ON(size & ~PAGE_MASK);
if (!need_flush && !icache_is_pipt()) if (!need_flush && !icache_is_pipt())
goto vipt_cache; goto vipt_cache;
......
...@@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
kvm_guest_exit(); kvm_guest_exit();
trace_kvm_exit(*vcpu_pc(vcpu)); trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/* /*
* We may have taken a host interrupt in HYP mode (ie * We may have taken a host interrupt in HYP mode (ie
* while executing the guest). This interrupt is still * while executing the guest). This interrupt is still
......
...@@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry, ...@@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry,
); );
TRACE_EVENT(kvm_exit, TRACE_EVENT(kvm_exit,
TP_PROTO(unsigned long vcpu_pc), TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
TP_ARGS(vcpu_pc), TP_ARGS(exit_reason, vcpu_pc),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( unsigned int, exit_reason )
__field( unsigned long, vcpu_pc ) __field( unsigned long, vcpu_pc )
), ),
TP_fast_assign( TP_fast_assign(
__entry->exit_reason = exit_reason;
__entry->vcpu_pc = vcpu_pc; __entry->vcpu_pc = vcpu_pc;
), ),
TP_printk("PC: 0x%08lx", __entry->vcpu_pc) TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
__entry->exit_reason,
__entry->vcpu_pc)
); );
TRACE_EVENT(kvm_guest_fault, TRACE_EVENT(kvm_guest_fault,
......
...@@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, ...@@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
if (idx > current_cpu_data.tlbsize) { if (idx > current_cpu_data.tlbsize) {
kvm_err("%s: Invalid Index: %d\n", __func__, idx); kvm_err("%s: Invalid Index: %d\n", __func__, idx);
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
local_irq_restore(flags);
return -1; return -1;
} }
......
...@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit, ...@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason), TP_ARGS(vcpu, reason),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct kvm_vcpu *, vcpu) __field(unsigned long, pc)
__field(unsigned int, reason) __field(unsigned int, reason)
), ),
TP_fast_assign( TP_fast_assign(
__entry->vcpu = vcpu; __entry->pc = vcpu->arch.pc;
__entry->reason = reason; __entry->reason = reason;
), ),
TP_printk("[%s]PC: 0x%08lx", TP_printk("[%s]PC: 0x%08lx",
kvm_mips_exit_types_str[__entry->reason], kvm_mips_exit_types_str[__entry->reason],
__entry->vcpu->arch.pc) __entry->pc)
); );
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
......
...@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done; goto done;
} }
} }
ctxt->dst.orig_val = ctxt->dst.val; /* Copy full 64-bit value for CMPXCHG8B. */
ctxt->dst.orig_val64 = ctxt->dst.val64;
special_insn: special_insn:
......
...@@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) ...@@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
} }
apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
apic->highest_isr_cache = -1; apic->highest_isr_cache = -1;
update_divide_count(apic); update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0); atomic_set(&apic->lapic_timer.pending, 0);
...@@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, ...@@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
update_divide_count(apic); update_divide_count(apic);
start_apic_timer(apic); start_apic_timer(apic);
apic->irr_pending = true; apic->irr_pending = true;
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
1 : count_vectors(apic->regs + APIC_ISR); 1 : count_vectors(apic->regs + APIC_ISR);
apic->highest_isr_cache = -1; apic->highest_isr_cache = -1;
if (kvm_x86_ops->hwapic_irr_update) if (kvm_x86_ops->hwapic_irr_update)
......
...@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) ...@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return; return;
} }
static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
{
return;
}
static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{ {
return; return;
...@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
.vm_has_apicv = svm_vm_has_apicv, .vm_has_apicv = svm_vm_has_apicv,
.load_eoi_exitmap = svm_load_eoi_exitmap, .load_eoi_exitmap = svm_load_eoi_exitmap,
.hwapic_isr_update = svm_hwapic_isr_update,
.sync_pir_to_irr = svm_sync_pir_to_irr, .sync_pir_to_irr = svm_sync_pir_to_irr,
.set_tss_addr = svm_set_tss_addr, .set_tss_addr = svm_set_tss_addr,
......
...@@ -4367,6 +4367,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) ...@@ -4367,6 +4367,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_SMP
if (vcpu->mode == IN_GUEST_MODE) {
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
return true;
}
#endif
return false;
}
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
int vector) int vector)
{ {
...@@ -4375,9 +4387,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, ...@@ -4375,9 +4387,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu) && if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) { vector == vmx->nested.posted_intr_nv) {
/* the PIR and ON have been set by L1. */ /* the PIR and ON have been set by L1. */
if (vcpu->mode == IN_GUEST_MODE) kvm_vcpu_trigger_posted_interrupt(vcpu);
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
/* /*
* If a posted intr is not recognized by hardware, * If a posted intr is not recognized by hardware,
* we will accomplish it in the next vmentry. * we will accomplish it in the next vmentry.
...@@ -4409,12 +4419,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) ...@@ -4409,12 +4419,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
r = pi_test_and_set_on(&vmx->pi_desc); r = pi_test_and_set_on(&vmx->pi_desc);
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
#ifdef CONFIG_SMP if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
if (!r && (vcpu->mode == IN_GUEST_MODE))
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
else
#endif
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment