Commit 5082219b authored by Filippo Sironi's avatar Filippo Sironi Committed by Joerg Roedel

iommu/vt-d: Don't be too aggressive when clearing one context entry

Previously, we were invalidating context cache and IOTLB globally when
clearing one context entry.  This is a tad too aggressive.
Invalidate the context cache and IOTLB for the interested device only.
Signed-off-by: default avatarFilippo Sironi <sironi@amazon.de>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: iommu@lists.linux-foundation.org
Cc: linux-kernel@vger.kernel.org
Acked-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 11b93ebf
...@@ -974,20 +974,6 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) ...@@ -974,20 +974,6 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
return ret; return ret;
} }
static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct context_entry *context;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (context) {
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
}
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void free_context_table(struct intel_iommu *iommu) static void free_context_table(struct intel_iommu *iommu)
{ {
int i; int i;
...@@ -2361,13 +2347,33 @@ static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long i ...@@ -2361,13 +2347,33 @@ static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long i
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
{ {
unsigned long flags;
struct context_entry *context;
u16 did_old;
if (!iommu) if (!iommu)
return; return;
clear_context_table(iommu, bus, devfn); spin_lock_irqsave(&iommu->lock, flags);
iommu->flush.flush_context(iommu, 0, 0, 0, context = iommu_context_addr(iommu, bus, devfn, 0);
DMA_CCMD_GLOBAL_INVL); if (!context) {
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); spin_unlock_irqrestore(&iommu->lock, flags);
return;
}
did_old = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock_irqrestore(&iommu->lock, flags);
iommu->flush.flush_context(iommu,
did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu,
did_old,
0,
0,
DMA_TLB_DSI_FLUSH);
} }
static inline void unlink_domain_info(struct device_domain_info *info) static inline void unlink_domain_info(struct device_domain_info *info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment