Commit 3f4b87b9 authored by Joerg Roedel's avatar Joerg Roedel

iommu/amd: Make use of domain_alloc and domain_free

Implement the new iommu-ops function pointers and remove the
obsolete domain_init and domain_destroy functions.
Reviewed-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent a10315e5
...@@ -126,6 +126,11 @@ static int __init alloc_passthrough_domain(void); ...@@ -126,6 +126,11 @@ static int __init alloc_passthrough_domain(void);
* *
****************************************************************************/ ****************************************************************************/
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
{
return container_of(dom, struct protection_domain, domain);
}
static struct iommu_dev_data *alloc_dev_data(u16 devid) static struct iommu_dev_data *alloc_dev_data(u16 devid)
{ {
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
...@@ -3236,42 +3241,45 @@ static int __init alloc_passthrough_domain(void) ...@@ -3236,42 +3241,45 @@ static int __init alloc_passthrough_domain(void)
return 0; return 0;
} }
static int amd_iommu_domain_init(struct iommu_domain *dom)
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{ {
struct protection_domain *domain; struct protection_domain *pdomain;
domain = protection_domain_alloc(); /* We only support unmanaged domains for now */
if (!domain) if (type != IOMMU_DOMAIN_UNMANAGED)
goto out_free; return NULL;
domain->mode = PAGE_MODE_3_LEVEL; pdomain = protection_domain_alloc();
domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); if (!pdomain)
if (!domain->pt_root)
goto out_free; goto out_free;
domain->iommu_domain = dom; pdomain->mode = PAGE_MODE_3_LEVEL;
pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
dom->priv = domain; if (!pdomain->pt_root)
goto out_free;
dom->geometry.aperture_start = 0; pdomain->domain.geometry.aperture_start = 0;
dom->geometry.aperture_end = ~0ULL; pdomain->domain.geometry.aperture_end = ~0ULL;
dom->geometry.force_aperture = true; pdomain->domain.geometry.force_aperture = true;
return 0; return &pdomain->domain;
out_free: out_free:
protection_domain_free(domain); protection_domain_free(pdomain);
return -ENOMEM; return NULL;
} }
static void amd_iommu_domain_destroy(struct iommu_domain *dom) static void amd_iommu_domain_free(struct iommu_domain *dom)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain;
if (!domain) if (!dom)
return; return;
domain = to_pdomain(dom);
if (domain->dev_cnt > 0) if (domain->dev_cnt > 0)
cleanup_domain(domain); cleanup_domain(domain);
...@@ -3284,8 +3292,6 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) ...@@ -3284,8 +3292,6 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
free_gcr3_table(domain); free_gcr3_table(domain);
protection_domain_free(domain); protection_domain_free(domain);
dom->priv = NULL;
} }
static void amd_iommu_detach_device(struct iommu_domain *dom, static void amd_iommu_detach_device(struct iommu_domain *dom,
...@@ -3313,7 +3319,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, ...@@ -3313,7 +3319,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
static int amd_iommu_attach_device(struct iommu_domain *dom, static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev) struct device *dev)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
struct amd_iommu *iommu; struct amd_iommu *iommu;
int ret; int ret;
...@@ -3340,7 +3346,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, ...@@ -3340,7 +3346,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot) phys_addr_t paddr, size_t page_size, int iommu_prot)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
int prot = 0; int prot = 0;
int ret; int ret;
...@@ -3362,7 +3368,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, ...@@ -3362,7 +3368,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
size_t page_size) size_t page_size)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
size_t unmap_size; size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE) if (domain->mode == PAGE_MODE_NONE)
...@@ -3380,7 +3386,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, ...@@ -3380,7 +3386,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova) dma_addr_t iova)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
unsigned long offset_mask; unsigned long offset_mask;
phys_addr_t paddr; phys_addr_t paddr;
u64 *pte, __pte; u64 *pte, __pte;
...@@ -3420,8 +3426,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) ...@@ -3420,8 +3426,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
static const struct iommu_ops amd_iommu_ops = { static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable, .capable = amd_iommu_capable,
.domain_init = amd_iommu_domain_init, .domain_alloc = amd_iommu_domain_alloc,
.domain_destroy = amd_iommu_domain_destroy, .domain_free = amd_iommu_domain_free,
.attach_dev = amd_iommu_attach_device, .attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device, .detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map, .map = amd_iommu_map,
...@@ -3483,7 +3489,7 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); ...@@ -3483,7 +3489,7 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
void amd_iommu_domain_direct_map(struct iommu_domain *dom) void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
...@@ -3504,7 +3510,7 @@ EXPORT_SYMBOL(amd_iommu_domain_direct_map); ...@@ -3504,7 +3510,7 @@ EXPORT_SYMBOL(amd_iommu_domain_direct_map);
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
unsigned long flags; unsigned long flags;
int levels, ret; int levels, ret;
...@@ -3616,7 +3622,7 @@ static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, ...@@ -3616,7 +3622,7 @@ static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
int amd_iommu_flush_page(struct iommu_domain *dom, int pasid, int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
u64 address) u64 address)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -3638,7 +3644,7 @@ static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) ...@@ -3638,7 +3644,7 @@ static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid) int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -3718,7 +3724,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid) ...@@ -3718,7 +3724,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid)
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
unsigned long cr3) unsigned long cr3)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -3732,7 +3738,7 @@ EXPORT_SYMBOL(amd_iommu_domain_set_gcr3); ...@@ -3732,7 +3738,7 @@ EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid) int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = to_pdomain(dom);
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -3765,17 +3771,17 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr); ...@@ -3765,17 +3771,17 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{ {
struct protection_domain *domain; struct protection_domain *pdomain;
domain = get_domain(&pdev->dev); pdomain = get_domain(&pdev->dev);
if (IS_ERR(domain)) if (IS_ERR(pdomain))
return NULL; return NULL;
/* Only return IOMMUv2 domains */ /* Only return IOMMUv2 domains */
if (!(domain->flags & PD_IOMMUV2_MASK)) if (!(pdomain->flags & PD_IOMMUV2_MASK))
return NULL; return NULL;
return domain->iommu_domain; return &pdomain->domain;
} }
EXPORT_SYMBOL(amd_iommu_get_v2_domain); EXPORT_SYMBOL(amd_iommu_get_v2_domain);
......
...@@ -400,6 +400,8 @@ struct iommu_domain; ...@@ -400,6 +400,8 @@ struct iommu_domain;
struct protection_domain { struct protection_domain {
struct list_head list; /* for list of all protection domains */ struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */ struct list_head dev_list; /* List of all devices in this domain */
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/ spinlock_t lock; /* mostly used to lock the page table*/
struct mutex api_lock; /* protect page tables in the iommu-api path */ struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */ u16 id; /* the domain id written to the device table */
...@@ -411,10 +413,7 @@ struct protection_domain { ...@@ -411,10 +413,7 @@ struct protection_domain {
bool updated; /* complete domain flush required */ bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
void *priv; /* private data */ void *priv; /* private data */
struct iommu_domain *iommu_domain; /* Pointer to generic
domain structure */
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment