Commit 8cc233de authored by Vasant Hegde's avatar Vasant Hegde Committed by Joerg Roedel

iommu/amd/io-pgtable: Implement map_pages io_pgtable_ops callback

Implement the io_pgtable_ops->map_pages() callback for AMD driver.
Also deprecate io_pgtable->map callback.
Suggested-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Link: https://lore.kernel.org/r/20220825063939.8360-2-vasant.hegde@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 7e18e42e
...@@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist) ...@@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
* supporting all features of AMD IOMMU page tables like level skipping * supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces. * and full 64 bit address spaces.
*/ */
static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova, static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp) phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{ {
struct protection_domain *dom = io_pgtable_ops_to_domain(ops); struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
LIST_HEAD(freelist); LIST_HEAD(freelist);
...@@ -369,39 +370,47 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -369,39 +370,47 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
u64 __pte, *pte; u64 __pte, *pte;
int ret, i, count; int ret, i, count;
BUG_ON(!IS_ALIGNED(iova, size)); BUG_ON(!IS_ALIGNED(iova, pgsize));
BUG_ON(!IS_ALIGNED(paddr, size)); BUG_ON(!IS_ALIGNED(paddr, pgsize));
ret = -EINVAL; ret = -EINVAL;
if (!(prot & IOMMU_PROT_MASK)) if (!(prot & IOMMU_PROT_MASK))
goto out; goto out;
count = PAGE_SIZE_PTE_COUNT(size); while (pgcount > 0) {
pte = alloc_pte(dom, iova, size, NULL, gfp, &updated); count = PAGE_SIZE_PTE_COUNT(pgsize);
pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
ret = -ENOMEM; ret = -ENOMEM;
if (!pte) if (!pte)
goto out; goto out;
for (i = 0; i < count; ++i) for (i = 0; i < count; ++i)
free_clear_pte(&pte[i], pte[i], &freelist); free_clear_pte(&pte[i], pte[i], &freelist);
if (!list_empty(&freelist)) if (!list_empty(&freelist))
updated = true; updated = true;
if (count > 1) { if (count > 1) {
__pte = PAGE_SIZE_PTE(__sme_set(paddr), size); __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
} else } else
__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC; __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
if (prot & IOMMU_PROT_IR) if (prot & IOMMU_PROT_IR)
__pte |= IOMMU_PTE_IR; __pte |= IOMMU_PTE_IR;
if (prot & IOMMU_PROT_IW) if (prot & IOMMU_PROT_IW)
__pte |= IOMMU_PTE_IW; __pte |= IOMMU_PTE_IW;
for (i = 0; i < count; ++i) for (i = 0; i < count; ++i)
pte[i] = __pte; pte[i] = __pte;
iova += pgsize;
paddr += pgsize;
pgcount--;
if (mapped)
*mapped += pgsize;
}
ret = 0; ret = 0;
...@@ -514,7 +523,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo ...@@ -514,7 +523,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops; cfg->tlb = &v1_flush_ops;
pgtable->iop.ops.map = iommu_v1_map_page; pgtable->iop.ops.map_pages = iommu_v1_map_pages;
pgtable->iop.ops.unmap = iommu_v1_unmap_page; pgtable->iop.ops.unmap = iommu_v1_unmap_page;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment