Commit f9b4df79 authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel

iommu/amd: Declare functions as extern

And move declaration to header file so that they can be included across
multiple files. There is no functional change.
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20201215073705.123786-6-suravee.suthikulpanit@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 1f585530
......@@ -57,6 +57,9 @@ extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
u64 address);
extern void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
extern void amd_iommu_domain_update(struct protection_domain *domain);
extern void amd_iommu_domain_flush_complete(struct protection_domain *domain);
extern void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
......
......@@ -86,7 +86,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static void detach_device(struct device *dev);
/****************************************************************************
......@@ -1313,12 +1312,12 @@ static void domain_flush_pages(struct protection_domain *domain,
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void domain_flush_tlb_pde(struct protection_domain *domain)
void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
{
__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
}
static void domain_flush_complete(struct protection_domain *domain)
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{
int i;
......@@ -1343,7 +1342,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
domain_flush_pages(domain, iova, size);
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags);
}
}
......@@ -1500,7 +1499,7 @@ static bool increase_address_space(struct protection_domain *domain,
pgtable.root = pte;
pgtable.mode += 1;
amd_iommu_update_and_flush_device_table(domain);
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
/*
* Device Table needs to be updated and flushed before the new root can
......@@ -1753,8 +1752,8 @@ static int iommu_map_page(struct protection_domain *dom,
* Updates and flushing already happened in
* increase_address_space().
*/
domain_flush_tlb_pde(dom);
domain_flush_complete(dom);
amd_iommu_domain_flush_tlb_pde(dom);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
......@@ -1997,10 +1996,10 @@ static void do_detach(struct iommu_dev_data *dev_data)
device_flush_dte(dev_data);
/* Flush IOTLB */
domain_flush_tlb_pde(domain);
amd_iommu_domain_flush_tlb_pde(domain);
/* Wait for the flushes to finish */
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
......@@ -2133,9 +2132,9 @@ static int attach_device(struct device *dev,
* left the caches in the IOMMU dirty. So we have to flush
* here to evict all dirty stuff.
*/
domain_flush_tlb_pde(domain);
amd_iommu_domain_flush_tlb_pde(domain);
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
out:
spin_unlock(&dev_data->lock);
......@@ -2297,7 +2296,7 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
domain_flush_devices(domain);
}
static void update_domain(struct protection_domain *domain)
void amd_iommu_domain_update(struct protection_domain *domain)
{
struct domain_pgtable pgtable;
......@@ -2306,8 +2305,8 @@ static void update_domain(struct protection_domain *domain)
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
domain_flush_tlb_pde(domain);
domain_flush_complete(domain);
amd_iommu_domain_flush_tlb_pde(domain);
amd_iommu_domain_flush_complete(domain);
}
int __init amd_iommu_init_api(void)
......@@ -2695,8 +2694,8 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
domain_flush_tlb_pde(dom);
domain_flush_complete(dom);
amd_iommu_domain_flush_tlb_pde(dom);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
......@@ -2786,7 +2785,7 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
amd_iommu_domain_clr_pt_root(domain);
/* Make changes visible to IOMMUs */
update_domain(domain);
amd_iommu_domain_update(domain);
/* Page-table is not visible to IOMMU anymore, so free it */
free_pagetable(&pgtable);
......@@ -2830,7 +2829,7 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
domain->glx = levels;
domain->flags |= PD_IOMMUV2_MASK;
update_domain(domain);
amd_iommu_domain_update(domain);
ret = 0;
......@@ -2867,7 +2866,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
}
/* Wait until IOMMU TLB flushes are complete */
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
/* Now flush device TLBs */
list_for_each_entry(dev_data, &domain->dev_list, list) {
......@@ -2893,7 +2892,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
}
/* Wait until all device TLBs are flushed */
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
ret = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment