Commit 5544eb9b authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: count number of assigned devices

If there are no assigned devices, the guest PAT are not providing
any useful information and can be overridden to writeback; VMX
always does this because it has the "IPAT" bit in its extended
page table entries, but SVM does not have anything similar.
Hook into VFIO and legacy device assignment so that they
provide this information to KVM.
Reviewed-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Tested-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 370777da
...@@ -604,6 +604,8 @@ struct kvm_arch { ...@@ -604,6 +604,8 @@ struct kvm_arch {
bool iommu_noncoherent; bool iommu_noncoherent;
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
atomic_t noncoherent_dma_count; atomic_t noncoherent_dma_count;
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
atomic_t assigned_device_count;
struct kvm_pic *vpic; struct kvm_pic *vpic;
struct kvm_ioapic *vioapic; struct kvm_ioapic *vioapic;
struct kvm_pit *vpit; struct kvm_pit *vpit;
......
...@@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev) ...@@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
goto out_unmap; goto out_unmap;
} }
kvm_arch_start_assignment(kvm);
pci_set_dev_assigned(pdev); pci_set_dev_assigned(pdev);
dev_info(&pdev->dev, "kvm assign device\n"); dev_info(&pdev->dev, "kvm assign device\n");
...@@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev) ...@@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
iommu_detach_device(domain, &pdev->dev); iommu_detach_device(domain, &pdev->dev);
pci_clear_dev_assigned(pdev); pci_clear_dev_assigned(pdev);
kvm_arch_end_assignment(kvm);
dev_info(&pdev->dev, "kvm deassign device\n"); dev_info(&pdev->dev, "kvm deassign device\n");
......
...@@ -8213,6 +8213,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) ...@@ -8213,6 +8213,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
kvm_x86_ops->interrupt_allowed(vcpu); kvm_x86_ops->interrupt_allowed(vcpu);
} }
void kvm_arch_start_assignment(struct kvm *kvm)
{
atomic_inc(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
void kvm_arch_end_assignment(struct kvm *kvm)
{
atomic_dec(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return atomic_read(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
void kvm_arch_register_noncoherent_dma(struct kvm *kvm) void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{ {
atomic_inc(&kvm->arch.noncoherent_dma_count); atomic_inc(&kvm->arch.noncoherent_dma_count);
......
...@@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) ...@@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false; return false;
} }
#endif #endif
#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
void kvm_arch_start_assignment(struct kvm *kvm);
void kvm_arch_end_assignment(struct kvm *kvm);
bool kvm_arch_has_assigned_device(struct kvm *kvm);
#else
static inline void kvm_arch_start_assignment(struct kvm *kvm)
{
}
static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}
static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return false;
}
#endif
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{ {
......
...@@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) ...@@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
list_add_tail(&kvg->node, &kv->group_list); list_add_tail(&kvg->node, &kv->group_list);
kvg->vfio_group = vfio_group; kvg->vfio_group = vfio_group;
kvm_arch_start_assignment(dev->kvm);
mutex_unlock(&kv->lock); mutex_unlock(&kv->lock);
kvm_vfio_update_coherency(dev); kvm_vfio_update_coherency(dev);
...@@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) ...@@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
break; break;
} }
kvm_arch_end_assignment(dev->kvm);
mutex_unlock(&kv->lock); mutex_unlock(&kv->lock);
kvm_vfio_group_put_external_user(vfio_group); kvm_vfio_group_put_external_user(vfio_group);
...@@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev) ...@@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
kvm_vfio_group_put_external_user(kvg->vfio_group); kvm_vfio_group_put_external_user(kvg->vfio_group);
list_del(&kvg->node); list_del(&kvg->node);
kfree(kvg); kfree(kvg);
kvm_arch_end_assignment(dev->kvm);
} }
kvm_vfio_update_coherency(dev); kvm_vfio_update_coherency(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment