Commit f15d9a99 authored by Joerg Roedel's avatar Joerg Roedel

iommu/amd: Remove domain->updated

This struct member was used to track whether a domain
change requires updates to the device-table and IOMMU cache
flushes. The problem is, that access to this field is racy
since locking in the common mapping code-paths has been
eliminated.

Move the updated field to the stack to get rid of all
potential races and remove the field from the struct.

Fixes: 92d420ec ("iommu/amd: Relax locking in dma_ops path")
Reviewed-by: default avatarFilippo Sironi <sironi@amazon.de>
Reviewed-by: default avatarJerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 0b15e02f
...@@ -1458,10 +1458,11 @@ static void free_pagetable(struct protection_domain *domain) ...@@ -1458,10 +1458,11 @@ static void free_pagetable(struct protection_domain *domain)
* another level increases the size of the address space by 9 bits to a size up * another level increases the size of the address space by 9 bits to a size up
* to 64 bits. * to 64 bits.
*/ */
static void increase_address_space(struct protection_domain *domain, static bool increase_address_space(struct protection_domain *domain,
gfp_t gfp) gfp_t gfp)
{ {
unsigned long flags; unsigned long flags;
bool ret = false;
u64 *pte; u64 *pte;
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
...@@ -1478,19 +1479,21 @@ static void increase_address_space(struct protection_domain *domain, ...@@ -1478,19 +1479,21 @@ static void increase_address_space(struct protection_domain *domain,
iommu_virt_to_phys(domain->pt_root)); iommu_virt_to_phys(domain->pt_root));
domain->pt_root = pte; domain->pt_root = pte;
domain->mode += 1; domain->mode += 1;
domain->updated = true;
ret = true;
out: out:
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
return; return ret;
} }
static u64 *alloc_pte(struct protection_domain *domain, static u64 *alloc_pte(struct protection_domain *domain,
unsigned long address, unsigned long address,
unsigned long page_size, unsigned long page_size,
u64 **pte_page, u64 **pte_page,
gfp_t gfp) gfp_t gfp,
bool *updated)
{ {
int level, end_lvl; int level, end_lvl;
u64 *pte, *page; u64 *pte, *page;
...@@ -1498,7 +1501,7 @@ static u64 *alloc_pte(struct protection_domain *domain, ...@@ -1498,7 +1501,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
BUG_ON(!is_power_of_2(page_size)); BUG_ON(!is_power_of_2(page_size));
while (address > PM_LEVEL_SIZE(domain->mode)) while (address > PM_LEVEL_SIZE(domain->mode))
increase_address_space(domain, gfp); *updated = increase_address_space(domain, gfp) || *updated;
level = domain->mode - 1; level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
...@@ -1530,7 +1533,7 @@ static u64 *alloc_pte(struct protection_domain *domain, ...@@ -1530,7 +1533,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
for (i = 0; i < count; ++i) for (i = 0; i < count; ++i)
cmpxchg64(&lpte[i], __pte, 0ULL); cmpxchg64(&lpte[i], __pte, 0ULL);
domain->updated = true; *updated = true;
continue; continue;
} }
...@@ -1547,7 +1550,7 @@ static u64 *alloc_pte(struct protection_domain *domain, ...@@ -1547,7 +1550,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
if (cmpxchg64(pte, __pte, __npte) != __pte) if (cmpxchg64(pte, __pte, __npte) != __pte)
free_page((unsigned long)page); free_page((unsigned long)page);
else if (IOMMU_PTE_PRESENT(__pte)) else if (IOMMU_PTE_PRESENT(__pte))
domain->updated = true; *updated = true;
continue; continue;
} }
...@@ -1656,28 +1659,29 @@ static int iommu_map_page(struct protection_domain *dom, ...@@ -1656,28 +1659,29 @@ static int iommu_map_page(struct protection_domain *dom,
gfp_t gfp) gfp_t gfp)
{ {
struct page *freelist = NULL; struct page *freelist = NULL;
bool updated = false;
u64 __pte, *pte; u64 __pte, *pte;
int i, count; int ret, i, count;
BUG_ON(!IS_ALIGNED(bus_addr, page_size)); BUG_ON(!IS_ALIGNED(bus_addr, page_size));
BUG_ON(!IS_ALIGNED(phys_addr, page_size)); BUG_ON(!IS_ALIGNED(phys_addr, page_size));
ret = -EINVAL;
if (!(prot & IOMMU_PROT_MASK)) if (!(prot & IOMMU_PROT_MASK))
return -EINVAL; goto out;
count = PAGE_SIZE_PTE_COUNT(page_size); count = PAGE_SIZE_PTE_COUNT(page_size);
pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp); pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
if (!pte) { ret = -ENOMEM;
update_domain(dom); if (!pte)
return -ENOMEM; goto out;
}
for (i = 0; i < count; ++i) for (i = 0; i < count; ++i)
freelist = free_clear_pte(&pte[i], pte[i], freelist); freelist = free_clear_pte(&pte[i], pte[i], freelist);
if (freelist != NULL) if (freelist != NULL)
dom->updated = true; updated = true;
if (count > 1) { if (count > 1) {
__pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size); __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
...@@ -1693,12 +1697,16 @@ static int iommu_map_page(struct protection_domain *dom, ...@@ -1693,12 +1697,16 @@ static int iommu_map_page(struct protection_domain *dom,
for (i = 0; i < count; ++i) for (i = 0; i < count; ++i)
pte[i] = __pte; pte[i] = __pte;
update_domain(dom); ret = 0;
out:
if (updated)
update_domain(dom);
/* Everything flushed out, free pages now */ /* Everything flushed out, free pages now */
free_page_list(freelist); free_page_list(freelist);
return 0; return ret;
} }
static unsigned long iommu_unmap_page(struct protection_domain *dom, static unsigned long iommu_unmap_page(struct protection_domain *dom,
...@@ -2399,15 +2407,10 @@ static void update_device_table(struct protection_domain *domain) ...@@ -2399,15 +2407,10 @@ static void update_device_table(struct protection_domain *domain)
static void update_domain(struct protection_domain *domain) static void update_domain(struct protection_domain *domain)
{ {
if (!domain->updated)
return;
update_device_table(domain); update_device_table(domain);
domain_flush_devices(domain); domain_flush_devices(domain);
domain_flush_tlb_pde(domain); domain_flush_tlb_pde(domain);
domain->updated = false;
} }
static int dir2prot(enum dma_data_direction direction) static int dir2prot(enum dma_data_direction direction)
...@@ -3333,7 +3336,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom) ...@@ -3333,7 +3336,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
/* Update data structure */ /* Update data structure */
domain->mode = PAGE_MODE_NONE; domain->mode = PAGE_MODE_NONE;
domain->updated = true;
/* Make changes visible to IOMMUs */ /* Make changes visible to IOMMUs */
update_domain(domain); update_domain(domain);
...@@ -3379,7 +3381,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) ...@@ -3379,7 +3381,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
domain->glx = levels; domain->glx = levels;
domain->flags |= PD_IOMMUV2_MASK; domain->flags |= PD_IOMMUV2_MASK;
domain->updated = true;
update_domain(domain); update_domain(domain);
......
...@@ -475,7 +475,6 @@ struct protection_domain { ...@@ -475,7 +475,6 @@ struct protection_domain {
int glx; /* Number of levels for GCR3 table */ int glx; /* Number of levels for GCR3 table */
u64 *gcr3_tbl; /* Guest CR3 table */ u64 *gcr3_tbl; /* Guest CR3 table */
unsigned long flags; /* flags to find out type of domain */ unsigned long flags; /* flags to find out type of domain */
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment