Commit 75cc1018 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Move clflush'es from iotlb_sync_map() to map_pages()

As the Intel VT-d driver has switched to use the iommu_ops.map_pages()
callback, multiple pages of the same size will be mapped in a call.
There's no need to put the clflush'es in iotlb_sync_map() callback.
Move them back into __domain_mapping() to simplify the code.
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20210720020615.4144323-4-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 3f34f125
......@@ -2333,9 +2333,9 @@ static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
struct dma_pte *pte = NULL;
phys_addr_t pteval;
u64 attr;
......@@ -2368,6 +2368,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
return -ENOMEM;
first_pte = pte;
/* It is large page*/
if (largepage_lvl > 1) {
unsigned long end_pfn;
......@@ -2415,14 +2417,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
* recalculate 'pte' and switch back to smaller pages for the
* end of the mapping, if the trailing size is not enough to
* use another superpage (i.e. nr_pages < lvl_pages).
*
* We leave clflush for the leaf pte changes to iotlb_sync_map()
* callback.
*/
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
(largepage_lvl > 1 && nr_pages < lvl_pages))
(largepage_lvl > 1 && nr_pages < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
}
return 0;
......@@ -5563,39 +5565,6 @@ static bool risky_device(struct pci_dev *pdev)
return false;
}
static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
unsigned long clf_pages)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned long lvl_pages = 0;
int level = 0;
while (clf_pages > 0) {
if (!pte) {
level = 0;
pte = pfn_to_dma_pte(domain, clf_pfn, &level);
if (WARN_ON(!pte))
return;
first_pte = pte;
lvl_pages = lvl_to_nr_pages(level);
}
if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
return;
clf_pages -= lvl_pages;
clf_pfn += lvl_pages;
pte++;
if (!clf_pages || first_pte_in_page(pte) ||
(level > 1 && clf_pages < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
}
}
static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
......@@ -5605,9 +5574,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
struct intel_iommu *iommu;
int iommu_id;
if (!dmar_domain->iommu_coherency)
clflush_sync_map(dmar_domain, pfn, pages);
for_each_domain_iommu(iommu_id, dmar_domain) {
iommu = g_iommus[iommu_id];
__mapping_notify_one(iommu, dmar_domain, pfn, pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment