Commit ac1a3483 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Add domain_flush_pasid_iotlb()

The VT-d spec requires to use PASID-based-IOTLB invalidation descriptor
to invalidate IOTLB and the paging-structure caches for a first-stage
page table. Add a generic helper to do this.

RID2PASID is used if the domain has been attached to a physical device,
otherwise real PASIDs that the domain has been attached to will be used.
The 'real' PASID attachment is handled in the subsequent change.
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarJacob Pan <jacob.jun.pan@linux.intel.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20230802212427.1497170-4-jacob.jun.pan@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 2dcebc7d
...@@ -1467,6 +1467,18 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, ...@@ -1467,6 +1467,18 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
} }
static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
struct dmar_domain *domain, u64 addr,
unsigned long npages, bool ih)
{
u16 did = domain_id_iommu(domain, iommu);
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain, struct dmar_domain *domain,
unsigned long pfn, unsigned int pages, unsigned long pfn, unsigned int pages,
...@@ -1484,7 +1496,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, ...@@ -1484,7 +1496,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
ih = 1 << 6; ih = 1 << 6;
if (domain->use_first_level) { if (domain->use_first_level) {
qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, pages, ih); domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
} else { } else {
unsigned long bitmask = aligned_pages - 1; unsigned long bitmask = aligned_pages - 1;
...@@ -1554,7 +1566,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) ...@@ -1554,7 +1566,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
u16 did = domain_id_iommu(dmar_domain, iommu); u16 did = domain_id_iommu(dmar_domain, iommu);
if (dmar_domain->use_first_level) if (dmar_domain->use_first_level)
qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, 0, -1, 0); domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
else else
iommu->flush.flush_iotlb(iommu, did, 0, 0, iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH); DMA_TLB_DSI_FLUSH);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment