Commit 0455d317 authored by Yi Liu's avatar Yi Liu Committed by Joerg Roedel

iommu/vt-d: Add __iommu_flush_iotlb_psi()

Add __iommu_flush_iotlb_psi() to do the psi iotlb flush with a DID input
rather than calculating it within the helper.

This is useful when flushing cache for parent domain which reuses DIDs of
its nested domains.
Signed-off-by: default avatarYi Liu <yi.l.liu@intel.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240208082307.15759-3-yi.l.liu@intel.comSigned-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 85ce8e1d
...@@ -1368,6 +1368,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu, ...@@ -1368,6 +1368,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
} }
static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
unsigned long pfn, unsigned int pages,
int ih)
{
unsigned int aligned_pages = __roundup_pow_of_two(pages);
unsigned long bitmask = aligned_pages - 1;
unsigned int mask = ilog2(aligned_pages);
u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
/*
* PSI masks the low order bits of the base address. If the
* address isn't aligned to the mask, then compute a mask value
* needed to ensure the target range is flushed.
*/
if (unlikely(bitmask & pfn)) {
unsigned long end_pfn = pfn + pages - 1, shared_bits;
/*
* Since end_pfn <= pfn + bitmask, the only way bits
* higher than bitmask can differ in pfn and end_pfn is
* by carrying. This means after masking out bitmask,
* high bits starting with the first set bit in
* shared_bits are all equal in both pfn and end_pfn.
*/
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
}
/*
* Fallback to domain selective flush if no PSI support or
* the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
else
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
DMA_TLB_PSI_FLUSH);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain, struct dmar_domain *domain,
unsigned long pfn, unsigned int pages, unsigned long pfn, unsigned int pages,
...@@ -1384,42 +1424,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, ...@@ -1384,42 +1424,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih) if (ih)
ih = 1 << 6; ih = 1 << 6;
if (domain->use_first_level) { if (domain->use_first_level)
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih); domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
} else { else
unsigned long bitmask = aligned_pages - 1; __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
/*
* PSI masks the low order bits of the base address. If the
* address isn't aligned to the mask, then compute a mask value
* needed to ensure the target range is flushed.
*/
if (unlikely(bitmask & pfn)) {
unsigned long end_pfn = pfn + pages - 1, shared_bits;
/*
* Since end_pfn <= pfn + bitmask, the only way bits
* higher than bitmask can differ in pfn and end_pfn is
* by carrying. This means after masking out bitmask,
* high bits starting with the first set bit in
* shared_bits are all equal in both pfn and end_pfn.
*/
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
}
/*
* Fallback to domain selective flush if no PSI support or
* the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
else
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
DMA_TLB_PSI_FLUSH);
}
/* /*
* In caching mode, changes of pages from non-present to present require * In caching mode, changes of pages from non-present to present require
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment