Commit a5a91e54 authored by Vasant Hegde's avatar Vasant Hegde Committed by Joerg Roedel

iommu/amd: Add SVA domain support

- Allocate SVA domain and setup mmu notifier. In free path unregister
  mmu notifier and free protection domain.

- Add mmu notifier callback function. It will retrieve SVA protection
  domain and invalidates IO/TLB.
Signed-off-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20240418103400.6229-16-vasant.hegde@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 80af5a45
...@@ -45,6 +45,11 @@ extern enum io_pgtable_fmt amd_iommu_pgtable; ...@@ -45,6 +45,11 @@ extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level; extern int amd_iommu_gpt_level;
/* Protection domain ops */ /* Protection domain ops */
struct protection_domain *protection_domain_alloc(unsigned int type);
void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);
void amd_iommu_domain_free(struct iommu_domain *dom);
int iommu_sva_set_dev_pasid(struct iommu_domain *domain, int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid); struct device *dev, ioasid_t pasid);
void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid); void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid);
......
...@@ -586,6 +586,7 @@ struct protection_domain { ...@@ -586,6 +586,7 @@ struct protection_domain {
unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
struct mmu_notifier mn; /* mmu notifier for the SVA domain */
struct list_head dev_data_list; /* List of pdom_dev_data */ struct list_head dev_data_list; /* List of pdom_dev_data */
}; };
......
...@@ -2280,7 +2280,7 @@ static void cleanup_domain(struct protection_domain *domain) ...@@ -2280,7 +2280,7 @@ static void cleanup_domain(struct protection_domain *domain)
WARN_ON(domain->dev_cnt != 0); WARN_ON(domain->dev_cnt != 0);
} }
static void protection_domain_free(struct protection_domain *domain) void protection_domain_free(struct protection_domain *domain)
{ {
if (!domain) if (!domain)
return; return;
...@@ -2323,7 +2323,7 @@ static int protection_domain_init_v2(struct protection_domain *pdom) ...@@ -2323,7 +2323,7 @@ static int protection_domain_init_v2(struct protection_domain *pdom)
return 0; return 0;
} }
static struct protection_domain *protection_domain_alloc(unsigned int type) struct protection_domain *protection_domain_alloc(unsigned int type)
{ {
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain; struct protection_domain *domain;
...@@ -2346,6 +2346,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) ...@@ -2346,6 +2346,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
switch (type) { switch (type) {
/* No need to allocate io pgtable ops in passthrough mode */ /* No need to allocate io pgtable ops in passthrough mode */
case IOMMU_DOMAIN_IDENTITY: case IOMMU_DOMAIN_IDENTITY:
case IOMMU_DOMAIN_SVA:
return domain; return domain;
case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_DMA:
pgtable = amd_iommu_pgtable; pgtable = amd_iommu_pgtable;
...@@ -2465,7 +2466,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags, ...@@ -2465,7 +2466,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
return do_iommu_domain_alloc(type, dev, flags); return do_iommu_domain_alloc(type, dev, flags);
} }
static void amd_iommu_domain_free(struct iommu_domain *dom) void amd_iommu_domain_free(struct iommu_domain *dom)
{ {
struct protection_domain *domain; struct protection_domain *domain;
unsigned long flags; unsigned long flags;
...@@ -2833,6 +2834,7 @@ static int amd_iommu_dev_enable_feature(struct device *dev, ...@@ -2833,6 +2834,7 @@ static int amd_iommu_dev_enable_feature(struct device *dev,
switch (feat) { switch (feat) {
case IOMMU_DEV_FEAT_IOPF: case IOMMU_DEV_FEAT_IOPF:
case IOMMU_DEV_FEAT_SVA:
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -2848,6 +2850,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev, ...@@ -2848,6 +2850,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev,
switch (feat) { switch (feat) {
case IOMMU_DEV_FEAT_IOPF: case IOMMU_DEV_FEAT_IOPF:
case IOMMU_DEV_FEAT_SVA:
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -2860,6 +2863,7 @@ const struct iommu_ops amd_iommu_ops = { ...@@ -2860,6 +2863,7 @@ const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable, .capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc, .domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_user = amd_iommu_domain_alloc_user, .domain_alloc_user = amd_iommu_domain_alloc_user,
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
.probe_device = amd_iommu_probe_device, .probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device, .release_device = amd_iommu_release_device,
.probe_finalize = amd_iommu_probe_finalize, .probe_finalize = amd_iommu_probe_finalize,
......
...@@ -56,6 +56,49 @@ static void remove_pdom_dev_pasid(struct protection_domain *pdom, ...@@ -56,6 +56,49 @@ static void remove_pdom_dev_pasid(struct protection_domain *pdom,
} }
} }
static void sva_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct pdom_dev_data *pdom_dev_data;
struct protection_domain *sva_pdom;
unsigned long flags;
sva_pdom = container_of(mn, struct protection_domain, mn);
spin_lock_irqsave(&sva_pdom->lock, flags);
for_each_pdom_dev_data(pdom_dev_data, sva_pdom) {
amd_iommu_dev_flush_pasid_pages(pdom_dev_data->dev_data,
pdom_dev_data->pasid,
start, end - start);
}
spin_unlock_irqrestore(&sva_pdom->lock, flags);
}
static void sva_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct pdom_dev_data *pdom_dev_data, *next;
struct protection_domain *sva_pdom;
unsigned long flags;
sva_pdom = container_of(mn, struct protection_domain, mn);
spin_lock_irqsave(&sva_pdom->lock, flags);
/* Assume dev_data_list contains same PASID with different devices */
for_each_pdom_dev_data_safe(pdom_dev_data, next, sva_pdom)
remove_dev_pasid(pdom_dev_data);
spin_unlock_irqrestore(&sva_pdom->lock, flags);
}
static const struct mmu_notifier_ops sva_mn = {
.arch_invalidate_secondary_tlbs = sva_arch_invalidate_secondary_tlbs,
.release = sva_mn_release,
};
int iommu_sva_set_dev_pasid(struct iommu_domain *domain, int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid) struct device *dev, ioasid_t pasid)
{ {
...@@ -120,3 +163,40 @@ void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) ...@@ -120,3 +163,40 @@ void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
spin_unlock_irqrestore(&sva_pdom->lock, flags); spin_unlock_irqrestore(&sva_pdom->lock, flags);
} }
static void iommu_sva_domain_free(struct iommu_domain *domain)
{
struct protection_domain *sva_pdom = to_pdomain(domain);
if (sva_pdom->mn.ops)
mmu_notifier_unregister(&sva_pdom->mn, domain->mm);
amd_iommu_domain_free(domain);
}
static const struct iommu_domain_ops amd_sva_domain_ops = {
.set_dev_pasid = iommu_sva_set_dev_pasid,
.free = iommu_sva_domain_free
};
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm)
{
struct protection_domain *pdom;
int ret;
pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA);
if (!pdom)
return ERR_PTR(-ENOMEM);
pdom->domain.ops = &amd_sva_domain_ops;
pdom->mn.ops = &sva_mn;
ret = mmu_notifier_register(&pdom->mn, mm);
if (ret) {
protection_domain_free(pdom);
return ERR_PTR(ret);
}
return &pdom->domain;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment