Commit 9ac0b338 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Joerg Roedel

iommu/amd: Narrow the use of struct protection_domain to invalidation

The AMD io_pgtable stuff doesn't implement the tlb ops callbacks, instead
it invokes the invalidation ops directly on the struct protection_domain.

Narrow the use of struct protection_domain to only those few code paths.
Make everything else properly use struct amd_io_pgtable through the call
chains, which is the correct modular type for an io-pgtable module.
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Link: https://lore.kernel.org/r/9-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 47f218d1
...@@ -137,11 +137,13 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist) ...@@ -137,11 +137,13 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
* another level increases the size of the address space by 9 bits to a size up * another level increases the size of the address space by 9 bits to a size up
* to 64 bits. * to 64 bits.
*/ */
static bool increase_address_space(struct protection_domain *domain, static bool increase_address_space(struct amd_io_pgtable *pgtable,
unsigned long address, unsigned long address,
gfp_t gfp) gfp_t gfp)
{ {
struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg; struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
struct protection_domain *domain =
container_of(pgtable, struct protection_domain, iop);
unsigned long flags; unsigned long flags;
bool ret = true; bool ret = true;
u64 *pte; u64 *pte;
...@@ -152,17 +154,17 @@ static bool increase_address_space(struct protection_domain *domain, ...@@ -152,17 +154,17 @@ static bool increase_address_space(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
if (address <= PM_LEVEL_SIZE(domain->iop.mode)) if (address <= PM_LEVEL_SIZE(pgtable->mode))
goto out; goto out;
ret = false; ret = false;
if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL)) if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL))
goto out; goto out;
*pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root)); *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
domain->iop.root = pte; pgtable->root = pte;
domain->iop.mode += 1; pgtable->mode += 1;
amd_iommu_update_and_flush_device_table(domain); amd_iommu_update_and_flush_device_table(domain);
pte = NULL; pte = NULL;
...@@ -175,31 +177,31 @@ static bool increase_address_space(struct protection_domain *domain, ...@@ -175,31 +177,31 @@ static bool increase_address_space(struct protection_domain *domain,
return ret; return ret;
} }
static u64 *alloc_pte(struct protection_domain *domain, static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
unsigned long address, unsigned long address,
unsigned long page_size, unsigned long page_size,
u64 **pte_page, u64 **pte_page,
gfp_t gfp, gfp_t gfp,
bool *updated) bool *updated)
{ {
struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg; struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
int level, end_lvl; int level, end_lvl;
u64 *pte, *page; u64 *pte, *page;
BUG_ON(!is_power_of_2(page_size)); BUG_ON(!is_power_of_2(page_size));
while (address > PM_LEVEL_SIZE(domain->iop.mode)) { while (address > PM_LEVEL_SIZE(pgtable->mode)) {
/* /*
* Return an error if there is no memory to update the * Return an error if there is no memory to update the
* page-table. * page-table.
*/ */
if (!increase_address_space(domain, address, gfp)) if (!increase_address_space(pgtable, address, gfp))
return NULL; return NULL;
} }
level = domain->iop.mode - 1; level = pgtable->mode - 1;
pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)]; pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
address = PAGE_SIZE_ALIGN(address, page_size); address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size); end_lvl = PAGE_SIZE_LEVEL(page_size);
...@@ -348,7 +350,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -348,7 +350,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount, phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped) int prot, gfp_t gfp, size_t *mapped)
{ {
struct protection_domain *dom = io_pgtable_ops_to_domain(ops); struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
LIST_HEAD(freelist); LIST_HEAD(freelist);
bool updated = false; bool updated = false;
u64 __pte, *pte; u64 __pte, *pte;
...@@ -365,7 +367,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -365,7 +367,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
while (pgcount > 0) { while (pgcount > 0) {
count = PAGE_SIZE_PTE_COUNT(pgsize); count = PAGE_SIZE_PTE_COUNT(pgsize);
pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated); pte = alloc_pte(pgtable, iova, pgsize, NULL, gfp, &updated);
ret = -ENOMEM; ret = -ENOMEM;
if (!pte) if (!pte)
...@@ -402,6 +404,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -402,6 +404,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
out: out:
if (updated) { if (updated) {
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dom->lock, flags); spin_lock_irqsave(&dom->lock, flags);
......
...@@ -233,8 +233,8 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -233,8 +233,8 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount, phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped) int prot, gfp_t gfp, size_t *mapped)
{ {
struct protection_domain *pdom = io_pgtable_ops_to_domain(ops); struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg; struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
u64 *pte; u64 *pte;
unsigned long map_size; unsigned long map_size;
unsigned long mapped_size = 0; unsigned long mapped_size = 0;
...@@ -251,7 +251,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -251,7 +251,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
while (mapped_size < size) { while (mapped_size < size) {
map_size = get_alloc_page_size(pgsize); map_size = get_alloc_page_size(pgsize);
pte = v2_alloc_pte(cfg->amd.nid, pdom->iop.pgd, pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
iova, map_size, gfp, &updated); iova, map_size, gfp, &updated);
if (!pte) { if (!pte) {
ret = -EINVAL; ret = -EINVAL;
...@@ -266,8 +266,11 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -266,8 +266,11 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
} }
out: out:
if (updated) if (updated) {
struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
amd_iommu_domain_flush_pages(pdom, o_iova, size); amd_iommu_domain_flush_pages(pdom, o_iova, size);
}
if (mapped) if (mapped)
*mapped += mapped_size; *mapped += mapped_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment