Commit 18f18376 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "Nothing big this time. In particular:

   - Debugging code for Tegra-GART

   - Improvement in Intel VT-d fault printing to prevent soft-lockups
     when on fault storms

   - Improvements in AMD IOMMU event reporting

   - NUMA aware allocation in io-pgtable code for ARM

   - Various other small fixes and cleanups all over the place"

* tag 'iommu-updates-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/io-pgtable-arm: Make allocations NUMA-aware
  iommu/amd: Prevent possible null pointer dereference and infinite loop
  iommu/amd: Fix grammar of comments
  iommu: Clean up the comments for iommu_group_alloc
  iommu/vt-d: Remove unnecessary parentheses
  iommu/vt-d: Clean up pasid quirk for pre-production devices
  iommu/vt-d: Clean up unused variable in find_or_alloc_domain
  iommu/vt-d: Fix iotlb psi missing for mappings
  iommu/vt-d: Introduce __mapping_notify_one()
  iommu: Remove extra NULL check when call strtobool()
  iommu/amd: Update logging information for new event type
  iommu/amd: Update the PASID information printed to the system log
  iommu/tegra: gart: Fix gart_iommu_unmap()
  iommu/tegra: gart: Add debugging facility
  iommu/io-pgtable-arm: Use for_each_set_bit to simplify code
  iommu/qcom: Simplify getting .drvdata
  iommu: Remove depends on HAS_DMA in case of platform dependency
  iommu/vt-d: Ratelimit each dmar fault printing
parents f4e70c2e 1f568357
...@@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE ...@@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format" bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE select IOMMU_IO_PGTABLE
depends on HAS_DMA && (ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)) depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
help help
Enable support for the ARM long descriptor pagetable format. Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
...@@ -42,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST ...@@ -42,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
config IOMMU_IO_PGTABLE_ARMV7S config IOMMU_IO_PGTABLE_ARMV7S
bool "ARMv7/v8 Short Descriptor Format" bool "ARMv7/v8 Short Descriptor Format"
select IOMMU_IO_PGTABLE select IOMMU_IO_PGTABLE
depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) depends on ARM || ARM64 || COMPILE_TEST
help help
Enable support for the ARM Short-descriptor pagetable format. Enable support for the ARM Short-descriptor pagetable format.
This supports 32-bit virtual and physical addresses mapped using This supports 32-bit virtual and physical addresses mapped using
...@@ -377,7 +377,6 @@ config QCOM_IOMMU ...@@ -377,7 +377,6 @@ config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules # Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support" bool "Qualcomm IOMMU Support"
depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64) depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
depends on HAS_DMA
select IOMMU_API select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU select ARM_DMA_USE_IOMMU
......
...@@ -547,7 +547,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, ...@@ -547,7 +547,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
static void iommu_print_event(struct amd_iommu *iommu, void *__evt) static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{ {
struct device *dev = iommu->iommu.dev; struct device *dev = iommu->iommu.dev;
int type, devid, domid, flags; int type, devid, pasid, flags, tag;
volatile u32 *event = __evt; volatile u32 *event = __evt;
int count = 0; int count = 0;
u64 address; u64 address;
...@@ -555,7 +555,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) ...@@ -555,7 +555,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
retry: retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK; pasid = PPR_PASID(*(u64 *)&event[0]);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2]; address = (u64)(((u64)event[3]) << 32) | event[2];
...@@ -570,7 +570,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) ...@@ -570,7 +570,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
} }
if (type == EVENT_TYPE_IO_FAULT) { if (type == EVENT_TYPE_IO_FAULT) {
amd_iommu_report_page_fault(devid, domid, address, flags); amd_iommu_report_page_fault(devid, pasid, address, flags);
return; return;
} else { } else {
dev_err(dev, "AMD-Vi: Event logged ["); dev_err(dev, "AMD-Vi: Event logged [");
...@@ -578,10 +578,9 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) ...@@ -578,10 +578,9 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
switch (type) { switch (type) {
case EVENT_TYPE_ILL_DEV: case EVENT_TYPE_ILL_DEV:
dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x " dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
"address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags); pasid, address, flags);
dump_dte_entry(devid); dump_dte_entry(devid);
break; break;
case EVENT_TYPE_DEV_TAB_ERR: case EVENT_TYPE_DEV_TAB_ERR:
...@@ -591,34 +590,38 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) ...@@ -591,34 +590,38 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
address, flags); address, flags);
break; break;
case EVENT_TYPE_PAGE_TAB_ERR: case EVENT_TYPE_PAGE_TAB_ERR:
dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x " dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n",
"domain=0x%04x address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domid, address, flags); pasid, address, flags);
break; break;
case EVENT_TYPE_ILL_CMD: case EVENT_TYPE_ILL_CMD:
dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
dump_command(address); dump_command(address);
break; break;
case EVENT_TYPE_CMD_HARD_ERR: case EVENT_TYPE_CMD_HARD_ERR:
dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx " dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx flags=0x%04x]\n",
"flags=0x%04x]\n", address, flags); address, flags);
break; break;
case EVENT_TYPE_IOTLB_INV_TO: case EVENT_TYPE_IOTLB_INV_TO:
dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x " dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%016llx]\n",
"address=0x%016llx]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address); address);
break; break;
case EVENT_TYPE_INV_DEV_REQ: case EVENT_TYPE_INV_DEV_REQ:
dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x " dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
"address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags); pasid, address, flags);
break;
case EVENT_TYPE_INV_PPR_REQ:
pasid = ((event[0] >> 16) & 0xFFFF)
| ((event[1] << 6) & 0xF0000);
tag = event[1] & 0x03FF;
dev_err(dev, "INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break; break;
default: default:
dev_err(dev, KERN_ERR "UNKNOWN event[0]=0x%08x event[1]=0x%08x " dev_err(dev, "UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
"event[2]=0x%08x event[3]=0x%08x\n",
event[0], event[1], event[2], event[3]); event[0], event[1], event[2], event[3]);
} }
...@@ -1914,15 +1917,6 @@ static void do_detach(struct iommu_dev_data *dev_data) ...@@ -1914,15 +1917,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct amd_iommu *iommu; struct amd_iommu *iommu;
u16 alias; u16 alias;
/*
* First check if the device is still attached. It might already
* be detached from its domain because the generic
* iommu_detach_group code detached it and we try again here in
* our alias handling.
*/
if (!dev_data->domain)
return;
iommu = amd_iommu_rlookup_table[dev_data->devid]; iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias; alias = dev_data->alias;
...@@ -1942,8 +1936,8 @@ static void do_detach(struct iommu_dev_data *dev_data) ...@@ -1942,8 +1936,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
} }
/* /*
* If a device is not yet associated with a domain, this function does * If a device is not yet associated with a domain, this function makes the
* assigns it visible for the hardware * device visible in the domain
*/ */
static int __attach_device(struct iommu_dev_data *dev_data, static int __attach_device(struct iommu_dev_data *dev_data,
struct protection_domain *domain) struct protection_domain *domain)
...@@ -2064,8 +2058,8 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev) ...@@ -2064,8 +2058,8 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
} }
/* /*
* If a device is not yet associated with a domain, this function * If a device is not yet associated with a domain, this function makes the
* assigns it visible for the hardware * device visible in the domain
*/ */
static int attach_device(struct device *dev, static int attach_device(struct device *dev,
struct protection_domain *domain) struct protection_domain *domain)
...@@ -2127,9 +2121,6 @@ static void __detach_device(struct iommu_dev_data *dev_data) ...@@ -2127,9 +2121,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
*/ */
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
if (WARN_ON(!dev_data->domain))
return;
domain = dev_data->domain; domain = dev_data->domain;
spin_lock(&domain->lock); spin_lock(&domain->lock);
...@@ -2151,6 +2142,15 @@ static void detach_device(struct device *dev) ...@@ -2151,6 +2142,15 @@ static void detach_device(struct device *dev)
dev_data = get_dev_data(dev); dev_data = get_dev_data(dev);
domain = dev_data->domain; domain = dev_data->domain;
/*
* First check if the device is still attached. It might already
* be detached from its domain because the generic
* iommu_detach_group code detached it and we try again here in
* our alias handling.
*/
if (WARN_ON(!dev_data->domain))
return;
/* lock device table */ /* lock device table */
spin_lock_irqsave(&amd_iommu_devtable_lock, flags); spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data); __detach_device(dev_data);
...@@ -2796,6 +2796,7 @@ static void cleanup_domain(struct protection_domain *domain) ...@@ -2796,6 +2796,7 @@ static void cleanup_domain(struct protection_domain *domain)
while (!list_empty(&domain->dev_list)) { while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list, entry = list_first_entry(&domain->dev_list,
struct iommu_dev_data, list); struct iommu_dev_data, list);
BUG_ON(!entry->domain);
__detach_device(entry); __detach_device(entry);
} }
......
...@@ -133,6 +133,7 @@ ...@@ -133,6 +133,7 @@
#define EVENT_TYPE_CMD_HARD_ERR 0x6 #define EVENT_TYPE_CMD_HARD_ERR 0x6
#define EVENT_TYPE_IOTLB_INV_TO 0x7 #define EVENT_TYPE_IOTLB_INV_TO 0x7
#define EVENT_TYPE_INV_DEV_REQ 0x8 #define EVENT_TYPE_INV_DEV_REQ 0x8
#define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_DEVID_MASK 0xffff #define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0 #define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK 0xffff #define EVENT_DOMID_MASK 0xffff
......
...@@ -1618,17 +1618,13 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1618,17 +1618,13 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
int reg, fault_index; int reg, fault_index;
u32 fault_status; u32 fault_status;
unsigned long flag; unsigned long flag;
bool ratelimited;
static DEFINE_RATELIMIT_STATE(rs, static DEFINE_RATELIMIT_STATE(rs,
DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST); DEFAULT_RATELIMIT_BURST);
/* Disable printing, simply clear the fault when ratelimited */
ratelimited = !__ratelimit(&rs);
raw_spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG); fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status && !ratelimited) if (fault_status && __ratelimit(&rs))
pr_err("DRHD: handling fault status reg %x\n", fault_status); pr_err("DRHD: handling fault status reg %x\n", fault_status);
/* TBD: ignore advanced fault log currently */ /* TBD: ignore advanced fault log currently */
...@@ -1638,6 +1634,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1638,6 +1634,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_index = dma_fsts_fault_record_index(fault_status); fault_index = dma_fsts_fault_record_index(fault_status);
reg = cap_fault_reg_offset(iommu->cap); reg = cap_fault_reg_offset(iommu->cap);
while (1) { while (1) {
/* Disable printing, simply clear the fault when ratelimited */
bool ratelimited = !__ratelimit(&rs);
u8 fault_reason; u8 fault_reason;
u16 source_id; u16 source_id;
u64 guest_addr; u64 guest_addr;
......
...@@ -485,37 +485,14 @@ static int dmar_forcedac; ...@@ -485,37 +485,14 @@ static int dmar_forcedac;
static int intel_iommu_strict; static int intel_iommu_strict;
static int intel_iommu_superpage = 1; static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1; static int intel_iommu_ecs = 1;
static int intel_iommu_pasid28;
static int iommu_identity_mapping; static int iommu_identity_mapping;
#define IDENTMAP_ALL 1 #define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2 #define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4 #define IDENTMAP_AZALIA 4
/* Broadwell and Skylake have broken ECS support — normal so-called "second #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
* level" translation of DMA requests-without-PASID doesn't actually happen #define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
* unless you also set the NESTE bit in an extended context-entry. Which of
* course means that SVM doesn't work because it's trying to do nested
* translation of the physical addresses it finds in the process page tables,
* through the IOVA->phys mapping found in the "second level" page tables.
*
* The VT-d specification was retroactively changed to change the definition
* of the capability bits and pretend that Broadwell/Skylake never happened...
* but unfortunately the wrong bit was changed. It's ECS which is broken, but
* for some reason it was the PASID capability bit which was redefined (from
* bit 28 on BDW/SKL to bit 40 in future).
*
* So our test for ECS needs to eschew those implementations which set the old
* PASID capabiity bit 28, since those are the ones on which ECS is broken.
* Unless we are working around the 'pasid28' limitations, that is, by putting
* the device into passthrough mode for normal DMA and thus masking the bug.
*/
#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
(intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
/* PASID support is thus enabled if ECS is enabled and *either* of the old
* or new capability bits are set. */
#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
(ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
int intel_iommu_gfx_mapped; int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
...@@ -578,11 +555,6 @@ static int __init intel_iommu_setup(char *str) ...@@ -578,11 +555,6 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n"); "Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0; intel_iommu_ecs = 0;
} else if (!strncmp(str, "pasid28", 7)) {
printk(KERN_INFO
"Intel-IOMMU: enable pre-production PASID support\n");
intel_iommu_pasid28 = 1;
iommu_identity_mapping |= IDENTMAP_GFX;
} else if (!strncmp(str, "tboot_noforce", 13)) { } else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
...@@ -1606,6 +1578,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, ...@@ -1606,6 +1578,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
iommu_flush_dev_iotlb(domain, addr, mask); iommu_flush_dev_iotlb(domain, addr, mask);
} }
/* Notification for newly created mappings */
static inline void __mapping_notify_one(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages)
{
/* It's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
else
iommu_flush_write_buffer(iommu);
}
static void iommu_flush_iova(struct iova_domain *iovad) static void iommu_flush_iova(struct iova_domain *iovad)
{ {
struct dmar_domain *domain; struct dmar_domain *domain;
...@@ -2340,18 +2324,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -2340,18 +2324,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return 0; return 0;
} }
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
{
int ret;
struct intel_iommu *iommu;
/* Do the real mapping first */
ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
if (ret)
return ret;
/* Notify about the new mapping */
if (domain_type_is_vm(domain)) {
/* VM typed domains can have more than one IOMMUs */
int iommu_id;
for_each_domain_iommu(iommu_id, domain) {
iommu = g_iommus[iommu_id];
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
} else {
/* General domains only have one IOMMU */
iommu = domain_get_iommu(domain);
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
return 0;
}
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long nr_pages, struct scatterlist *sg, unsigned long nr_pages,
int prot) int prot)
{ {
return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
} }
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, unsigned long phys_pfn, unsigned long nr_pages,
int prot) int prot)
{ {
return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
} }
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
...@@ -2534,7 +2547,7 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) ...@@ -2534,7 +2547,7 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
struct device_domain_info *info = NULL; struct device_domain_info *info = NULL;
struct dmar_domain *domain = NULL; struct dmar_domain *domain = NULL;
struct intel_iommu *iommu; struct intel_iommu *iommu;
u16 req_id, dma_alias; u16 dma_alias;
unsigned long flags; unsigned long flags;
u8 bus, devfn; u8 bus, devfn;
...@@ -2542,8 +2555,6 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) ...@@ -2542,8 +2555,6 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
if (!iommu) if (!iommu)
return NULL; return NULL;
req_id = ((u16)bus << 8) | devfn;
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
...@@ -2657,8 +2668,8 @@ static int iommu_domain_identity_map(struct dmar_domain *domain, ...@@ -2657,8 +2668,8 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
*/ */
dma_pte_clear_range(domain, first_vpfn, last_vpfn); dma_pte_clear_range(domain, first_vpfn, last_vpfn);
return domain_pfn_mapping(domain, first_vpfn, first_vpfn, return __domain_mapping(domain, first_vpfn, NULL,
last_vpfn - first_vpfn + 1, first_vpfn, last_vpfn - first_vpfn + 1,
DMA_PTE_READ|DMA_PTE_WRITE); DMA_PTE_READ|DMA_PTE_WRITE);
} }
...@@ -3626,14 +3637,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, ...@@ -3626,14 +3637,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
if (ret) if (ret)
goto error; goto error;
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
mm_to_dma_pfn(iova_pfn),
size, 0, 1);
else
iommu_flush_write_buffer(iommu);
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK; start_paddr += paddr & ~PAGE_MASK;
return start_paddr; return start_paddr;
...@@ -3820,12 +3823,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ...@@ -3820,12 +3823,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return 0; return 0;
} }
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
else
iommu_flush_write_buffer(iommu);
return nelems; return nelems;
} }
......
...@@ -319,7 +319,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -319,7 +319,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
} else } else
pasid_max = 1 << 20; pasid_max = 1 << 20;
if ((flags & SVM_FLAG_SUPERVISOR_MODE)) { if (flags & SVM_FLAG_SUPERVISOR_MODE) {
if (!ecap_srs(iommu->ecap)) if (!ecap_srs(iommu->ecap))
return -EINVAL; return -EINVAL;
} else if (pasid) { } else if (pasid) {
......
...@@ -898,8 +898,7 @@ static int __init arm_v7s_do_selftests(void) ...@@ -898,8 +898,7 @@ static int __init arm_v7s_do_selftests(void)
/* Full unmap */ /* Full unmap */
iova = 0; iova = 0;
i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG); for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
while (i != BITS_PER_LONG) {
size = 1UL << i; size = 1UL << i;
if (ops->unmap(ops, iova, size) != size) if (ops->unmap(ops, iova, size) != size)
...@@ -916,8 +915,6 @@ static int __init arm_v7s_do_selftests(void) ...@@ -916,8 +915,6 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops); return __FAIL(ops);
iova += SZ_16M; iova += SZ_16M;
i++;
i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
} }
free_io_pgtable_ops(ops); free_io_pgtable_ops(ops);
......
...@@ -231,12 +231,17 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, ...@@ -231,12 +231,17 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
struct io_pgtable_cfg *cfg) struct io_pgtable_cfg *cfg)
{ {
struct device *dev = cfg->iommu_dev; struct device *dev = cfg->iommu_dev;
int order = get_order(size);
struct page *p;
dma_addr_t dma; dma_addr_t dma;
void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO); void *pages;
if (!pages) VM_BUG_ON((gfp & __GFP_HIGHMEM));
p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
if (!p)
return NULL; return NULL;
pages = page_address(p);
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) if (dma_mapping_error(dev, dma))
...@@ -256,7 +261,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, ...@@ -256,7 +261,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
out_free: out_free:
free_pages_exact(pages, size); __free_pages(p, order);
return NULL; return NULL;
} }
...@@ -266,7 +271,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size, ...@@ -266,7 +271,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
size, DMA_TO_DEVICE); size, DMA_TO_DEVICE);
free_pages_exact(pages, size); free_pages((unsigned long)pages, get_order(size));
} }
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
...@@ -1120,8 +1125,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) ...@@ -1120,8 +1125,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
/* Full unmap */ /* Full unmap */
iova = 0; iova = 0;
j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
while (j != BITS_PER_LONG) {
size = 1UL << j; size = 1UL << j;
if (ops->unmap(ops, iova, size) != size) if (ops->unmap(ops, iova, size) != size)
...@@ -1138,8 +1142,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) ...@@ -1138,8 +1142,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i); return __FAIL(ops, i);
iova += SZ_1G; iova += SZ_1G;
j++;
j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
} }
free_io_pgtable_ops(ops); free_io_pgtable_ops(ops);
......
...@@ -116,9 +116,11 @@ static void __iommu_detach_group(struct iommu_domain *domain, ...@@ -116,9 +116,11 @@ static void __iommu_detach_group(struct iommu_domain *domain,
static int __init iommu_set_def_domain_type(char *str) static int __init iommu_set_def_domain_type(char *str)
{ {
bool pt; bool pt;
int ret;
if (!str || strtobool(str, &pt)) ret = kstrtobool(str, &pt);
return -EINVAL; if (ret)
return ret;
iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
return 0; return 0;
...@@ -322,7 +324,6 @@ static struct kobj_type iommu_group_ktype = { ...@@ -322,7 +324,6 @@ static struct kobj_type iommu_group_ktype = {
/** /**
* iommu_group_alloc - Allocate a new group * iommu_group_alloc - Allocate a new group
* @name: Optional name to associate with group, visible in sysfs
* *
* This function is called by an iommu driver to allocate a new iommu * This function is called by an iommu driver to allocate a new iommu
* group. The iommu group represents the minimum granularity of the iommu. * group. The iommu group represents the minimum granularity of the iommu.
......
...@@ -885,16 +885,14 @@ static int qcom_iommu_device_remove(struct platform_device *pdev) ...@@ -885,16 +885,14 @@ static int qcom_iommu_device_remove(struct platform_device *pdev)
static int __maybe_unused qcom_iommu_resume(struct device *dev) static int __maybe_unused qcom_iommu_resume(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
return qcom_iommu_enable_clocks(qcom_iommu); return qcom_iommu_enable_clocks(qcom_iommu);
} }
static int __maybe_unused qcom_iommu_suspend(struct device *dev) static int __maybe_unused qcom_iommu_suspend(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
qcom_iommu_disable_clocks(qcom_iommu); qcom_iommu_disable_clocks(qcom_iommu);
......
...@@ -72,6 +72,8 @@ struct gart_domain { ...@@ -72,6 +72,8 @@ struct gart_domain {
static struct gart_device *gart_handle; /* unique for a system */ static struct gart_device *gart_handle; /* unique for a system */
static bool gart_debug;
#define GART_PTE(_pfn) \ #define GART_PTE(_pfn) \
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
...@@ -271,6 +273,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -271,6 +273,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
struct gart_device *gart = gart_domain->gart; struct gart_device *gart = gart_domain->gart;
unsigned long flags; unsigned long flags;
unsigned long pfn; unsigned long pfn;
unsigned long pte;
if (!gart_iova_range_valid(gart, iova, bytes)) if (!gart_iova_range_valid(gart, iova, bytes))
return -EINVAL; return -EINVAL;
...@@ -282,6 +285,14 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -282,6 +285,14 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
spin_unlock_irqrestore(&gart->pte_lock, flags); spin_unlock_irqrestore(&gart->pte_lock, flags);
return -EINVAL; return -EINVAL;
} }
if (gart_debug) {
pte = gart_read_pte(gart, iova);
if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
spin_unlock_irqrestore(&gart->pte_lock, flags);
dev_err(gart->dev, "Page entry is in-use\n");
return -EBUSY;
}
}
gart_set_pte(gart, iova, GART_PTE(pfn)); gart_set_pte(gart, iova, GART_PTE(pfn));
FLUSH_GART_REGS(gart); FLUSH_GART_REGS(gart);
spin_unlock_irqrestore(&gart->pte_lock, flags); spin_unlock_irqrestore(&gart->pte_lock, flags);
...@@ -302,7 +313,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, ...@@ -302,7 +313,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
gart_set_pte(gart, iova, 0); gart_set_pte(gart, iova, 0);
FLUSH_GART_REGS(gart); FLUSH_GART_REGS(gart);
spin_unlock_irqrestore(&gart->pte_lock, flags); spin_unlock_irqrestore(&gart->pte_lock, flags);
return 0; return bytes;
} }
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
...@@ -515,7 +526,9 @@ static void __exit tegra_gart_exit(void) ...@@ -515,7 +526,9 @@ static void __exit tegra_gart_exit(void)
subsys_initcall(tegra_gart_init); subsys_initcall(tegra_gart_init);
module_exit(tegra_gart_exit); module_exit(tegra_gart_exit);
module_param(gart_debug, bool, 0644);
MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20"); MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_ALIAS("platform:tegra-gart"); MODULE_ALIAS("platform:tegra-gart");
......
...@@ -121,7 +121,6 @@ ...@@ -121,7 +121,6 @@
#define ecap_srs(e) ((e >> 31) & 0x1) #define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1) #define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1) #define ecap_prs(e) ((e >> 29) & 0x1)
#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1) #define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1) #define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1) #define ecap_mts(e) ((e >> 25) & 0x1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment