Commit 050fc52d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "All x86-specific, apart from some arch-independent syzkaller fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: cleanup the page tracking SRCU instance
  KVM: nVMX: fix nested EPT detection
  KVM: pci-assign: do not map smm memory slot pages in vt-d page tables
  KVM: kvm_io_bus_unregister_dev() should never fail
  KVM: VMX: Fix enable VPID conditions
  KVM: nVMX: Fix nested VPID vmx exec control
  KVM: x86: correct async page present tracepoint
  kvm: vmx: Flush TLB when the APIC-access address changes
  KVM: x86: use pic/ioapic destructor when destroy vm
  KVM: x86: check existance before destroy
  KVM: x86: clear bus pointer when destroyed
  KVM: Documentation: document MCE ioctls
  KVM: nVMX: don't reset kvm mmu twice
  PTP: fix ptr_ret.cocci warnings
  kvm: fix usage of uninit spinlock in avic_vm_destroy()
  KVM: VMX: downgrade warning on unexpected exit code
parents ad0376eb 2beb6dad
...@@ -3377,6 +3377,69 @@ struct kvm_ppc_resize_hpt { ...@@ -3377,6 +3377,69 @@ struct kvm_ppc_resize_hpt {
__u32 pad; __u32 pad;
}; };
4.104 KVM_X86_GET_MCE_CAP_SUPPORTED
Capability: KVM_CAP_MCE
Architectures: x86
Type: system ioctl
Parameters: u64 mce_cap (out)
Returns: 0 on success, -1 on error
Returns supported MCE capabilities. The u64 mce_cap parameter
has the same format as the MSR_IA32_MCG_CAP register. Supported
capabilities will have the corresponding bits set.
4.105 KVM_X86_SETUP_MCE
Capability: KVM_CAP_MCE
Architectures: x86
Type: vcpu ioctl
Parameters: u64 mcg_cap (in)
Returns: 0 on success,
-EFAULT if u64 mcg_cap cannot be read,
-EINVAL if the requested number of banks is invalid,
-EINVAL if requested MCE capability is not supported.
Initializes MCE support for use. The u64 mcg_cap parameter
has the same format as the MSR_IA32_MCG_CAP register and
specifies which capabilities should be enabled. The maximum
supported number of error-reporting banks can be retrieved when
checking for KVM_CAP_MCE. The supported capabilities can be
retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
4.106 KVM_X86_SET_MCE
Capability: KVM_CAP_MCE
Architectures: x86
Type: vcpu ioctl
Parameters: struct kvm_x86_mce (in)
Returns: 0 on success,
-EFAULT if struct kvm_x86_mce cannot be read,
-EINVAL if the bank number is invalid,
-EINVAL if VAL bit is not set in status field.
Inject a machine check error (MCE) into the guest. The input
parameter is:
struct kvm_x86_mce {
__u64 status;
__u64 addr;
__u64 misc;
__u64 mcg_status;
__u8 bank;
__u8 pad1[7];
__u64 pad2[3];
};
If the MCE being reported is an uncorrected error, KVM will
inject it as an MCE exception into the guest. If the guest
MCG_STATUS register reports that an MCE is in progress, KVM
causes an KVM_EXIT_SHUTDOWN vmexit.
Otherwise, if the MCE is a corrected error, KVM will just
store it in the corresponding bank (provided this bank is
not holding a previously reported uncorrected error).
5. The kvm_run structure 5. The kvm_run structure
------------------------ ------------------------
......
...@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node { ...@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node {
}; };
void kvm_page_track_init(struct kvm *kvm); void kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *free, void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont); struct kvm_memory_slot *dont);
......
...@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm) ...@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm)
{ {
struct kvm_pic *vpic = kvm->arch.vpic; struct kvm_pic *vpic = kvm->arch.vpic;
if (!vpic)
return;
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
......
...@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm) ...@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm)
{ {
struct kvm_ioapic *ioapic = kvm->arch.vioapic; struct kvm_ioapic *ioapic = kvm->arch.vioapic;
if (!ioapic)
return;
cancel_delayed_work_sync(&ioapic->eoi_inject); cancel_delayed_work_sync(&ioapic->eoi_inject);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
kvm->arch.vioapic = NULL; kvm->arch.vioapic = NULL;
......
...@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
} }
void kvm_page_track_cleanup(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head;
cleanup_srcu_struct(&head->track_srcu);
}
void kvm_page_track_init(struct kvm *kvm) void kvm_page_track_init(struct kvm *kvm)
{ {
struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_head *head;
......
...@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm) ...@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm)
unsigned long flags; unsigned long flags;
struct kvm_arch *vm_data = &kvm->arch; struct kvm_arch *vm_data = &kvm->arch;
if (!avic)
return;
avic_free_vm_id(vm_data->avic_vm_id); avic_free_vm_id(vm_data->avic_vm_id);
if (vm_data->avic_logical_id_table_page) if (vm_data->avic_logical_id_table_page)
......
...@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void) ...@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
} }
static inline bool cpu_has_vmx_invvpid(void)
{
return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
}
static inline bool cpu_has_vmx_ept(void) static inline bool cpu_has_vmx_ept(void)
{ {
return vmcs_config.cpu_based_2nd_exec_ctrl & return vmcs_config.cpu_based_2nd_exec_ctrl &
...@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) ...@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_DESC | SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_WBINVD_EXITING |
...@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) ...@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
* though it is treated as global context. The alternative is * though it is treated as global context. The alternative is
* not failing the single-context invvpid, and it is worse. * not failing the single-context invvpid, and it is worse.
*/ */
if (enable_vpid) if (enable_vpid) {
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_VPID;
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
VMX_VPID_EXTENT_SUPPORTED_MASK; VMX_VPID_EXTENT_SUPPORTED_MASK;
else } else
vmx->nested.nested_vmx_vpid_caps = 0; vmx->nested.nested_vmx_vpid_caps = 0;
if (enable_unrestricted_guest) if (enable_unrestricted_guest)
...@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) ...@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
} }
static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
{
if (enable_ept)
vmx_flush_tlb(vcpu);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{ {
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
...@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void) ...@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
if (boot_cpu_has(X86_FEATURE_NX)) if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX); kvm_enable_efer_bits(EFER_NX);
if (!cpu_has_vmx_vpid()) if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0; enable_vpid = 0;
if (!cpu_has_vmx_shadow_vmcs()) if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0; enable_shadow_vmcs = 0;
if (enable_shadow_vmcs) if (enable_shadow_vmcs)
...@@ -8501,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) ...@@ -8501,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
&& kvm_vmx_exit_handlers[exit_reason]) && kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu); return kvm_vmx_exit_handlers[exit_reason](vcpu);
else { else {
WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason);
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
...@@ -8547,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) ...@@ -8547,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
} else { } else {
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
vmx_flush_tlb_ept_only(vcpu);
} }
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
...@@ -8572,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) ...@@ -8572,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
*/ */
if (!is_guest_mode(vcpu) || if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(&vmx->vcpu), !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmcs_write64(APIC_ACCESS_ADDR, hpa); vmcs_write64(APIC_ACCESS_ADDR, hpa);
vmx_flush_tlb_ept_only(vcpu);
}
} }
static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
...@@ -9974,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -9974,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control; u32 exec_control;
bool nested_ept_enabled = false;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
...@@ -10121,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10121,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_intr_status); vmcs12->guest_intr_status);
} }
nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
/* /*
* Write an illegal value to APIC_ACCESS_ADDR. Later, * Write an illegal value to APIC_ACCESS_ADDR. Later,
* nested_get_vmcs12_pages will either fix it up or * nested_get_vmcs12_pages will either fix it up or
...@@ -10255,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10255,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (nested_cpu_has_ept(vmcs12)) { if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu); nested_ept_init_mmu_context(vcpu);
} else if (nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmx_flush_tlb_ept_only(vcpu);
} }
/* /*
...@@ -10282,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10282,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmx_set_efer(vcpu, vcpu->arch.efer); vmx_set_efer(vcpu, vcpu->arch.efer);
/* Shadow page tables on either EPT or shadow page tables. */ /* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code)) entry_failure_code))
return 1; return 1;
kvm_mmu_reset_context(vcpu);
if (!enable_ept) if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
...@@ -11056,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, ...@@ -11056,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx->nested.change_vmcs01_virtual_x2apic_mode = false; vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
vmx_set_virtual_x2apic_mode(vcpu, vmx_set_virtual_x2apic_mode(vcpu,
vcpu->arch.apic_base & X2APIC_ENABLE); vcpu->arch.apic_base & X2APIC_ENABLE);
} else if (!nested_cpu_has_ept(vmcs12) &&
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmx_flush_tlb_ept_only(vcpu);
} }
/* This is needed for same reason as it was needed in prepare_vmcs02 */ /* This is needed for same reason as it was needed in prepare_vmcs02 */
......
...@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (kvm_x86_ops->vm_destroy) if (kvm_x86_ops->vm_destroy)
kvm_x86_ops->vm_destroy(kvm); kvm_x86_ops->vm_destroy(kvm);
kvm_iommu_unmap_guest(kvm); kvm_iommu_unmap_guest(kvm);
kfree(kvm->arch.vpic); kvm_pic_destroy(kvm);
kfree(kvm->arch.vioapic); kvm_ioapic_destroy(kvm);
kvm_free_vcpus(kvm); kvm_free_vcpus(kvm);
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kvm_mmu_uninit_vm(kvm); kvm_mmu_uninit_vm(kvm);
kvm_page_track_cleanup(kvm);
} }
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
...@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, ...@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
{ {
struct x86_exception fault; struct x86_exception fault;
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (work->wakeup_all) if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */ work->arch.token = ~0; /* broadcast wakeup */
else else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn); kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
......
...@@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void) ...@@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void)
kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
if (IS_ERR(kvm_ptp_clock.ptp_clock)) return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
return PTR_ERR(kvm_ptp_clock.ptp_clock);
return 0;
} }
module_init(ptp_kvm_init); module_init(ptp_kvm_init);
......
...@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, ...@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val); int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev); int len, struct kvm_io_device *dev);
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev); struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr); gpa_t addr);
......
...@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
continue; continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
kvm->buses[bus_idx]->ioeventfd_count--; if (kvm->buses[bus_idx])
kvm->buses[bus_idx]->ioeventfd_count--;
ioeventfd_release(p); ioeventfd_release(p);
ret = 0; ret = 0;
break; break;
......
...@@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm) ...@@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
list_del(&kvm->vm_list); list_del(&kvm->vm_list);
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm); kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) for (i = 0; i < KVM_NR_BUSES; i++) {
kvm_io_bus_destroy(kvm->buses[i]); if (kvm->buses[i])
kvm_io_bus_destroy(kvm->buses[i]);
kvm->buses[i] = NULL;
}
kvm_coalesced_mmio_free(kvm); kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
...@@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
* changes) is disallowed above, so any other attribute changes getting * changes) is disallowed above, so any other attribute changes getting
* here can be skipped. * here can be skipped.
*/ */
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
r = kvm_iommu_map_pages(kvm, &new); r = kvm_iommu_map_pages(kvm, &new);
return r; return r;
} }
...@@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, ...@@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
}; };
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
if (!bus)
return -ENOMEM;
r = __kvm_io_bus_write(vcpu, bus, &range, val); r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0; return r < 0 ? r : 0;
} }
...@@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, ...@@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
}; };
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
if (!bus)
return -ENOMEM;
/* First try the device referenced by cookie. */ /* First try the device referenced by cookie. */
if ((cookie >= 0) && (cookie < bus->dev_count) && if ((cookie >= 0) && (cookie < bus->dev_count) &&
...@@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, ...@@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
}; };
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
if (!bus)
return -ENOMEM;
r = __kvm_io_bus_read(vcpu, bus, &range, val); r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0; return r < 0 ? r : 0;
} }
...@@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ...@@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
struct kvm_io_bus *new_bus, *bus; struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx]; bus = kvm->buses[bus_idx];
if (!bus)
return -ENOMEM;
/* exclude ioeventfd which is limited by maximum fd */ /* exclude ioeventfd which is limited by maximum fd */
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC; return -ENOSPC;
...@@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ...@@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
} }
/* Caller must hold slots_lock. */ /* Caller must hold slots_lock. */
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev) struct kvm_io_device *dev)
{ {
int i, r; int i;
struct kvm_io_bus *new_bus, *bus; struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx]; bus = kvm->buses[bus_idx];
r = -ENOENT; if (!bus)
return;
for (i = 0; i < bus->dev_count; i++) for (i = 0; i < bus->dev_count; i++)
if (bus->range[i].dev == dev) { if (bus->range[i].dev == dev) {
r = 0;
break; break;
} }
if (r) if (i == bus->dev_count)
return r; return;
new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL); sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus) if (!new_bus) {
return -ENOMEM; pr_err("kvm: failed to shrink bus, removing it completely\n");
goto broken;
}
memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
new_bus->dev_count--; new_bus->dev_count--;
memcpy(new_bus->range + i, bus->range + i + 1, memcpy(new_bus->range + i, bus->range + i + 1,
(new_bus->dev_count - i) * sizeof(struct kvm_io_range)); (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
broken:
rcu_assign_pointer(kvm->buses[bus_idx], new_bus); rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu); synchronize_srcu_expedited(&kvm->srcu);
kfree(bus); kfree(bus);
return r; return;
} }
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
...@@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
if (!bus)
goto out_unlock;
dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
if (dev_idx < 0) if (dev_idx < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment