Commit 8b6c32e8 authored by Will Deacon's avatar Will Deacon

Merge branch 'iommu/iommufd/paging-domain-alloc' into iommu/next

* iommu/iommufd/paging-domain-alloc:
  RDMA/usnic: Use iommu_paging_domain_alloc()
  wifi: ath11k: Use iommu_paging_domain_alloc()
  wifi: ath10k: Use iommu_paging_domain_alloc()
  drm/msm: Use iommu_paging_domain_alloc()
  vhost-vdpa: Use iommu_paging_domain_alloc()
  vfio/type1: Use iommu_paging_domain_alloc()
  iommufd: Use iommu_paging_domain_alloc()
  iommu: Add iommu_paging_domain_alloc() interface
parents 74e54d53 3b10f257
......@@ -407,10 +407,13 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
struct msm_iommu *iommu;
int ret;
domain = iommu_domain_alloc(dev->bus);
if (!domain)
if (!device_iommu_mapped(dev))
return NULL;
domain = iommu_paging_domain_alloc(dev);
if (IS_ERR(domain))
return ERR_CAST(domain);
iommu_set_pgtable_quirks(domain, quirks);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
......
......@@ -443,11 +443,11 @@ struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev)
if (!pd)
return ERR_PTR(-ENOMEM);
pd->domain = domain = iommu_domain_alloc(dev->bus);
if (!domain) {
pd->domain = domain = iommu_paging_domain_alloc(dev);
if (IS_ERR(domain)) {
usnic_err("Failed to allocate IOMMU domain");
kfree(pd);
return ERR_PTR(-ENOMEM);
return ERR_CAST(domain);
}
iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
......
......@@ -2010,6 +2010,10 @@ static int __iommu_domain_alloc_dev(struct device *dev, void *data)
return 0;
}
/*
* The iommu ops in bus has been retired. Do not use this interface in
* new drivers.
*/
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
const struct iommu_ops *ops = NULL;
......@@ -2026,6 +2030,22 @@ struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
/**
* iommu_paging_domain_alloc() - Allocate a paging domain
* @dev: device for which the domain is allocated
*
* Allocate a paging domain which will be managed by a kernel driver. Return
* allocated domain if successful, or a ERR pointer for failure.
*/
struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
{
if (!dev_has_iommu(dev))
return ERR_PTR(-ENODEV);
return __iommu_domain_alloc(dev_iommu_ops(dev), dev, IOMMU_DOMAIN_UNMANAGED);
}
EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
if (domain->type == IOMMU_DOMAIN_SVA)
......
......@@ -140,9 +140,10 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
}
hwpt->domain->owner = ops;
} else {
hwpt->domain = iommu_domain_alloc(idev->dev->bus);
if (!hwpt->domain) {
rc = -ENOMEM;
hwpt->domain = iommu_paging_domain_alloc(idev->dev);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
goto out_abort;
}
}
......
......@@ -1635,10 +1635,10 @@ static int ath10k_fw_init(struct ath10k *ar)
ar_snoc->fw.dev = &pdev->dev;
iommu_dom = iommu_domain_alloc(&platform_bus_type);
if (!iommu_dom) {
iommu_dom = iommu_paging_domain_alloc(ar_snoc->fw.dev);
if (IS_ERR(iommu_dom)) {
ath10k_err(ar, "failed to allocate iommu domain\n");
ret = -ENOMEM;
ret = PTR_ERR(iommu_dom);
goto err_unregister;
}
......
......@@ -1001,10 +1001,10 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
ab_ahb->fw.dev = &pdev->dev;
iommu_dom = iommu_domain_alloc(&platform_bus_type);
if (!iommu_dom) {
iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev);
if (IS_ERR(iommu_dom)) {
ath11k_err(ab, "failed to allocate iommu domain\n");
ret = -ENOMEM;
ret = PTR_ERR(iommu_dom);
goto err_unregister;
}
......
......@@ -2135,7 +2135,7 @@ static int vfio_iommu_domain_alloc(struct device *dev, void *data)
{
struct iommu_domain **domain = data;
*domain = iommu_domain_alloc(dev->bus);
*domain = iommu_paging_domain_alloc(dev);
return 1; /* Don't iterate */
}
......@@ -2192,11 +2192,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* us a representative device for the IOMMU API call. We don't actually
* want to iterate beyond the first device (if any).
*/
ret = -EIO;
iommu_group_for_each_dev(iommu_group, &domain->domain,
vfio_iommu_domain_alloc);
if (!domain->domain)
if (IS_ERR(domain->domain)) {
ret = PTR_ERR(domain->domain);
goto out_free_domain;
}
if (iommu->nesting) {
ret = iommu_enable_nesting(domain->domain);
......
......@@ -1312,26 +1312,24 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
const struct bus_type *bus;
int ret;
/* Device want to do DMA by itself */
if (ops->set_map || ops->dma_map)
return 0;
bus = dma_dev->bus;
if (!bus)
return -EFAULT;
if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
dev_warn_once(&v->dev,
"Failed to allocate domain, device is not IOMMU cache coherent capable\n");
return -ENOTSUPP;
}
v->domain = iommu_domain_alloc(bus);
if (!v->domain)
return -EIO;
v->domain = iommu_paging_domain_alloc(dma_dev);
if (IS_ERR(v->domain)) {
ret = PTR_ERR(v->domain);
v->domain = NULL;
return ret;
}
ret = iommu_attach_device(v->domain, dma_dev);
if (ret)
......
......@@ -785,6 +785,7 @@ extern bool iommu_present(const struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
struct iommu_domain *iommu_paging_domain_alloc(struct device *dev);
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
......@@ -1093,6 +1094,11 @@ static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus
return NULL;
}
static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
{
return ERR_PTR(-ENODEV);
}
static inline void iommu_domain_free(struct iommu_domain *domain)
{
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment