Commit 4fc2ea6a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:

 - Allow compiling the ARM-SMMU drivers as modules.

 - Fixes and cleanups for the ARM-SMMU drivers and io-pgtable code
   collected by Will Deacon. The merge-commit (6855d1ba) has all the
   details.

 - Cleanup of the iommu_put_resv_regions() call-backs in various
   drivers.

 - AMD IOMMU driver cleanups.

 - Update for the x2APIC support in the AMD IOMMU driver.

 - Preparation patches for Intel VT-d nested mode support.

 - RMRR and identity domain handling fixes for the Intel VT-d driver.

 - More small fixes and cleanups.

* tag 'iommu-updates-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (87 commits)
  iommu/amd: Remove the unnecessary assignment
  iommu/vt-d: Remove unnecessary WARN_ON_ONCE()
  iommu/vt-d: Unnecessary to handle default identity domain
  iommu/vt-d: Allow devices with RMRRs to use identity domain
  iommu/vt-d: Add RMRR base and end addresses sanity check
  iommu/vt-d: Mark firmware tainted if RMRR fails sanity check
  iommu/amd: Remove unused struct member
  iommu/amd: Replace two consecutive readl calls with one readq
  iommu/vt-d: Don't reject Host Bridge due to scope mismatch
  PCI/ATS: Add PASID stubs
  iommu/arm-smmu-v3: Return -EBUSY when trying to re-add a device
  iommu/arm-smmu-v3: Improve add_device() error handling
  iommu/arm-smmu-v3: Use WRITE_ONCE() when changing validity of an STE
  iommu/arm-smmu-v3: Add second level of context descriptor table
  iommu/arm-smmu-v3: Prepare for handling arm_smmu_write_ctx_desc() failure
  iommu/arm-smmu-v3: Propagate ssid_bits
  iommu/arm-smmu-v3: Add support for Substream IDs
  iommu/arm-smmu-v3: Add context descriptor tables allocators
  iommu/arm-smmu-v3: Prepare arm_smmu_s1_cfg for SSID support
  ACPI/IORT: Parse SSID property of named component node
  ...
parents d271ab29 e3b5ee0c
...@@ -86,6 +86,12 @@ have a means to turn off translation. But it is invalid in such cases to ...@@ -86,6 +86,12 @@ have a means to turn off translation. But it is invalid in such cases to
disable the IOMMU's device tree node in the first place because it would disable the IOMMU's device tree node in the first place because it would
prevent any driver from properly setting up the translations. prevent any driver from properly setting up the translations.
Optional properties:
--------------------
- pasid-num-bits: Some masters support multiple address spaces for DMA, by
tagging DMA transactions with an address space identifier. By default,
this is 0, which means that the device only has one address space.
Notes: Notes:
====== ======
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "ACPI: IORT: " fmt #define pr_fmt(fmt) "ACPI: IORT: " fmt
#include <linux/acpi_iort.h> #include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
...@@ -902,9 +903,9 @@ static inline bool iort_iommu_driver_enabled(u8 type) ...@@ -902,9 +903,9 @@ static inline bool iort_iommu_driver_enabled(u8 type)
{ {
switch (type) { switch (type) {
case ACPI_IORT_NODE_SMMU_V3: case ACPI_IORT_NODE_SMMU_V3:
return IS_BUILTIN(CONFIG_ARM_SMMU_V3); return IS_ENABLED(CONFIG_ARM_SMMU_V3);
case ACPI_IORT_NODE_SMMU: case ACPI_IORT_NODE_SMMU:
return IS_BUILTIN(CONFIG_ARM_SMMU); return IS_ENABLED(CONFIG_ARM_SMMU);
default: default:
pr_warn("IORT node type %u does not describe an SMMU\n", type); pr_warn("IORT node type %u does not describe an SMMU\n", type);
return false; return false;
...@@ -976,6 +977,20 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) ...@@ -976,6 +977,20 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
return iort_iommu_xlate(info->dev, parent, streamid); return iort_iommu_xlate(info->dev, parent, streamid);
} }
static void iort_named_component_init(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_named_component *nc;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!fwspec)
return;
nc = (struct acpi_iort_named_component *)node->node_data;
fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
nc->node_flags);
}
/** /**
* iort_iommu_configure - Set-up IOMMU configuration for a device. * iort_iommu_configure - Set-up IOMMU configuration for a device.
* *
...@@ -1030,6 +1045,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) ...@@ -1030,6 +1045,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (parent) if (parent)
err = iort_iommu_xlate(dev, parent, streamid); err = iort_iommu_xlate(dev, parent, streamid);
} while (parent && !err); } while (parent && !err);
if (!err)
iort_named_component_init(dev, node);
} }
/* /*
......
...@@ -82,7 +82,7 @@ config IOMMU_DEBUGFS ...@@ -82,7 +82,7 @@ config IOMMU_DEBUGFS
config IOMMU_DEFAULT_PASSTHROUGH config IOMMU_DEFAULT_PASSTHROUGH
bool "IOMMU passthrough by default" bool "IOMMU passthrough by default"
depends on IOMMU_API depends on IOMMU_API
help help
Enable passthrough by default, removing the need to pass in Enable passthrough by default, removing the need to pass in
iommu.passthrough=on or iommu=pt through command line. If this iommu.passthrough=on or iommu=pt through command line. If this
is enabled, you can still disable with iommu.passthrough=off is enabled, you can still disable with iommu.passthrough=off
...@@ -91,8 +91,8 @@ config IOMMU_DEFAULT_PASSTHROUGH ...@@ -91,8 +91,8 @@ config IOMMU_DEFAULT_PASSTHROUGH
If unsure, say N here. If unsure, say N here.
config OF_IOMMU config OF_IOMMU
def_bool y def_bool y
depends on OF && IOMMU_API depends on OF && IOMMU_API
# IOMMU-agnostic DMA-mapping layer # IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA config IOMMU_DMA
...@@ -214,6 +214,7 @@ config INTEL_IOMMU_SVM ...@@ -214,6 +214,7 @@ config INTEL_IOMMU_SVM
select PCI_PASID select PCI_PASID
select PCI_PRI select PCI_PRI
select MMU_NOTIFIER select MMU_NOTIFIER
select IOASID
help help
Shared Virtual Memory (SVM) provides a facility for devices Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by to access DMA resources through process address space by
...@@ -248,6 +249,18 @@ config INTEL_IOMMU_FLOPPY_WA ...@@ -248,6 +249,18 @@ config INTEL_IOMMU_FLOPPY_WA
workaround will setup a 1:1 mapping for the first workaround will setup a 1:1 mapping for the first
16MiB to make floppy (an ISA device) work. 16MiB to make floppy (an ISA device) work.
config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
bool "Enable Intel IOMMU scalable mode by default"
depends on INTEL_IOMMU
help
Selecting this option will enable by default the scalable mode if
hardware presents the capability. The scalable mode is defined in
VT-d 3.0. The scalable mode capability could be checked by reading
/sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
is not selected, scalable mode support could also be enabled by
passing intel_iommu=sm_on to the kernel. If not sure, please use
the default value.
config IRQ_REMAP config IRQ_REMAP
bool "Support for Interrupt Remapping" bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
...@@ -356,7 +369,7 @@ config SPAPR_TCE_IOMMU ...@@ -356,7 +369,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support # ARM IOMMU support
config ARM_SMMU config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support" tristate "ARM Ltd. System MMU (SMMU) Support"
depends on (ARM64 || ARM) && MMU depends on (ARM64 || ARM) && MMU
select IOMMU_API select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE select IOMMU_IO_PGTABLE_LPAE
...@@ -368,6 +381,18 @@ config ARM_SMMU ...@@ -368,6 +381,18 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture. the ARM SMMU architecture.
config ARM_SMMU_LEGACY_DT_BINDINGS
bool "Support the legacy \"mmu-masters\" devicetree bindings"
depends on ARM_SMMU=y && OF
help
Support for the badly designed and deprecated "mmu-masters"
devicetree bindings. This allows some DMA masters to attach
to the SMMU but does not provide any support via the DMA API.
If you're lucky, you might be able to get VFIO up and running.
If you say Y here then you'll make me very sad. Instead, say N
and move your firmware to the utopian future that was 2016.
config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
bool "Default to disabling bypass on ARM SMMU v1 and v2" bool "Default to disabling bypass on ARM SMMU v1 and v2"
depends on ARM_SMMU depends on ARM_SMMU
...@@ -394,7 +419,7 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT ...@@ -394,7 +419,7 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
config. config.
config ARM_SMMU_V3 config ARM_SMMU_V3
bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support" tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64 depends on ARM64
select IOMMU_API select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE select IOMMU_IO_PGTABLE_LPAE
......
...@@ -14,7 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o ...@@ -14,7 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o
arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
......
...@@ -2294,7 +2294,6 @@ int __init amd_iommu_init_api(void) ...@@ -2294,7 +2294,6 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void) int __init amd_iommu_init_dma_ops(void)
{ {
swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0; swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
iommu_detected = 1;
if (amd_iommu_unmap_flush) if (amd_iommu_unmap_flush)
pr_info("IO/TLB flush on unmap enabled\n"); pr_info("IO/TLB flush on unmap enabled\n");
...@@ -2638,15 +2637,6 @@ static void amd_iommu_get_resv_regions(struct device *dev, ...@@ -2638,15 +2637,6 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head); list_add_tail(&region->list, head);
} }
static void amd_iommu_put_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
kfree(entry);
}
static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
...@@ -2685,7 +2675,7 @@ const struct iommu_ops amd_iommu_ops = { ...@@ -2685,7 +2675,7 @@ const struct iommu_ops amd_iommu_ops = {
.device_group = amd_iommu_device_group, .device_group = amd_iommu_device_group,
.domain_get_attr = amd_iommu_domain_get_attr, .domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions, .get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = amd_iommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred, .is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES, .pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all, .flush_iotlb_all = amd_iommu_flush_iotlb_all,
......
...@@ -71,6 +71,8 @@ ...@@ -71,6 +71,8 @@
#define IVHD_FLAG_ISOC_EN_MASK 0x08 #define IVHD_FLAG_ISOC_EN_MASK 0x08
#define IVMD_FLAG_EXCL_RANGE 0x08 #define IVMD_FLAG_EXCL_RANGE 0x08
#define IVMD_FLAG_IW 0x04
#define IVMD_FLAG_IR 0x02
#define IVMD_FLAG_UNITY_MAP 0x01 #define IVMD_FLAG_UNITY_MAP 0x01
#define ACPI_DEVFLAG_INITPASS 0x01 #define ACPI_DEVFLAG_INITPASS 0x01
...@@ -147,7 +149,7 @@ bool amd_iommu_dump; ...@@ -147,7 +149,7 @@ bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly; bool amd_iommu_irq_remap __read_mostly;
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
static bool amd_iommu_detected; static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled; static bool __initdata amd_iommu_disabled;
...@@ -714,7 +716,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu) ...@@ -714,7 +716,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
iommu_feature_enable(iommu, CONTROL_PPR_EN); iommu_feature_enable(iommu, CONTROL_PPR_EN);
} }
...@@ -1116,21 +1118,17 @@ static int __init add_early_maps(void) ...@@ -1116,21 +1118,17 @@ static int __init add_early_maps(void)
*/ */
static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
{ {
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
return; return;
if (iommu) { /*
/* * Treat per-device exclusion ranges as r/w unity-mapped regions
* We only can configure exclusion ranges per IOMMU, not * since some buggy BIOSes might lead to the overwritten exclusion
* per device. But we can enable the exclusion range per * range (exclusion_start and exclusion_length members). This
* device. This is done here * happens when there are multiple exclusion ranges (IVMD entries)
*/ * defined in ACPI table.
set_dev_entry_bit(devid, DEV_ENTRY_EX); */
iommu->exclusion_start = m->range_start; m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
iommu->exclusion_length = m->range_length;
}
} }
/* /*
...@@ -1523,8 +1521,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1523,8 +1521,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
break; break;
case 0x11: case 0x11:
case 0x40: case 0x40:
...@@ -1534,8 +1530,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1534,8 +1530,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0)) /*
amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; * Note: Since iommu_update_intcapxt() leverages
* the IOMMU MMIO access to MSI capability block registers
* for MSI address lo/hi/data, we need to check both
* EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
*/
if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
(h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -1727,7 +1730,6 @@ static const struct attribute_group *amd_iommu_groups[] = { ...@@ -1727,7 +1730,6 @@ static const struct attribute_group *amd_iommu_groups[] = {
static int __init iommu_init_pci(struct amd_iommu *iommu) static int __init iommu_init_pci(struct amd_iommu *iommu)
{ {
int cap_ptr = iommu->cap_ptr; int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
int ret; int ret;
iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
...@@ -1740,19 +1742,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -1740,19 +1742,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap); &iommu->cap);
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
&range);
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
&misc);
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false; amd_iommu_iotlb_sup = false;
/* read extended feature bits */ /* read extended feature bits */
low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
iommu->features = ((u64)high << 32) | low;
if (iommu_feature(iommu, FEATURE_GT)) { if (iommu_feature(iommu, FEATURE_GT)) {
int glxval; int glxval;
...@@ -1996,8 +1991,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu) ...@@ -1996,8 +1991,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu)
struct irq_affinity_notify *notify = &iommu->intcapxt_notify; struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
/** /**
* IntCapXT requires XTSup=1, which can be inferred * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
* amd_iommu_xt_mode. * which can be inferred from amd_iommu_xt_mode.
*/ */
if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
return 0; return 0;
...@@ -2044,7 +2039,7 @@ static int iommu_init_msi(struct amd_iommu *iommu) ...@@ -2044,7 +2039,7 @@ static int iommu_init_msi(struct amd_iommu *iommu)
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
if (iommu->ppr_log != NULL) if (iommu->ppr_log != NULL)
iommu_feature_enable(iommu, CONTROL_PPFINT_EN); iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
iommu_ga_log_enable(iommu); iommu_ga_log_enable(iommu);
......
...@@ -147,8 +147,8 @@ ...@@ -147,8 +147,8 @@
#define CONTROL_COHERENT_EN 0x0aULL #define CONTROL_COHERENT_EN 0x0aULL
#define CONTROL_ISOC_EN 0x0bULL #define CONTROL_ISOC_EN 0x0bULL
#define CONTROL_CMDBUF_EN 0x0cULL #define CONTROL_CMDBUF_EN 0x0cULL
#define CONTROL_PPFLOG_EN 0x0dULL #define CONTROL_PPRLOG_EN 0x0dULL
#define CONTROL_PPFINT_EN 0x0eULL #define CONTROL_PPRINT_EN 0x0eULL
#define CONTROL_PPR_EN 0x0fULL #define CONTROL_PPR_EN 0x0fULL
#define CONTROL_GT_EN 0x10ULL #define CONTROL_GT_EN 0x10ULL
#define CONTROL_GA_EN 0x11ULL #define CONTROL_GA_EN 0x11ULL
...@@ -377,12 +377,12 @@ ...@@ -377,12 +377,12 @@
#define IOMMU_CAP_EFR 27 #define IOMMU_CAP_EFR 27
/* IOMMU Feature Reporting Field (for IVHD type 10h */ /* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_XTSUP_SHIFT 0
#define IOMMU_FEAT_GASUP_SHIFT 6 #define IOMMU_FEAT_GASUP_SHIFT 6
/* IOMMU Extended Feature Register (EFR) */ /* IOMMU Extended Feature Register (EFR) */
#define IOMMU_EFR_XTSUP_SHIFT 2 #define IOMMU_EFR_XTSUP_SHIFT 2
#define IOMMU_EFR_GASUP_SHIFT 7 #define IOMMU_EFR_GASUP_SHIFT 7
#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46
#define MAX_DOMAIN_ID 65536 #define MAX_DOMAIN_ID 65536
...@@ -463,7 +463,6 @@ struct amd_irte_ops; ...@@ -463,7 +463,6 @@ struct amd_irte_ops;
* independent of their use. * independent of their use.
*/ */
struct protection_domain { struct protection_domain {
struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */ struct list_head dev_list; /* List of all devices in this domain */
struct iommu_domain domain; /* generic domain handle used by struct iommu_domain domain; /* generic domain handle used by
iommu core code */ iommu core code */
......
...@@ -119,7 +119,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu) ...@@ -119,7 +119,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
* Secure has also cleared SACR.CACHE_LOCK for this to take effect... * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
*/ */
reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7); reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
major = FIELD_GET(ID7_MAJOR, reg); major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR); reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
if (major >= 2) if (major >= 2)
reg &= ~ARM_MMU500_ACR_CACHE_LOCK; reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -244,7 +244,7 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, ...@@ -244,7 +244,7 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) || info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
(scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE && (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
(info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL && (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) { info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
pr_warn("Device scope type does not match for %s\n", pr_warn("Device scope type does not match for %s\n",
pci_name(info->dev)); pci_name(info->dev));
return -EINVAL; return -EINVAL;
...@@ -1354,7 +1354,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, ...@@ -1354,7 +1354,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
struct qi_desc desc; struct qi_desc desc;
if (mask) { if (mask) {
WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
} else } else
...@@ -1371,6 +1370,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, ...@@ -1371,6 +1370,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
/* PASID-based IOTLB invalidation */
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
unsigned long npages, bool ih)
{
struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
/*
* npages == -1 means a PASID-selective invalidation, otherwise,
* a positive value for Page-selective-within-PASID invalidation.
* 0 is not a valid input.
*/
if (WARN_ON(!npages)) {
pr_err("Invalid input npages = %ld\n", npages);
return;
}
if (npages == -1) {
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = 0;
} else {
int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
if (WARN_ON_ONCE(!ALIGN(addr, align)))
addr &= ~(align - 1);
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = QI_EIOTLB_ADDR(addr) |
QI_EIOTLB_IH(ih) |
QI_EIOTLB_AM(mask);
}
qi_submit_sync(&desc, iommu);
}
/* /*
* Disable Queued Invalidation interface. * Disable Queued Invalidation interface.
*/ */
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Authors: Gayatri Kammela <gayatri.kammela@intel.com> * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
* Sohil Mehta <sohil.mehta@intel.com> * Sohil Mehta <sohil.mehta@intel.com>
* Jacob Pan <jacob.jun.pan@linux.intel.com> * Jacob Pan <jacob.jun.pan@linux.intel.com>
* Lu Baolu <baolu.lu@linux.intel.com>
*/ */
#include <linux/debugfs.h> #include <linux/debugfs.h>
...@@ -283,6 +284,77 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused) ...@@ -283,6 +284,77 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
} }
DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct); DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
static inline unsigned long level_to_directory_size(int level)
{
return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
}
static inline void
dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
{
seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
iova >> VTD_PAGE_SHIFT, path[5], path[4],
path[3], path[2], path[1]);
}
static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
int level, unsigned long start,
u64 *path)
{
int i;
if (level > 5 || level < 1)
return;
for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
i++, pde++, start += level_to_directory_size(level)) {
if (!dma_pte_present(pde))
continue;
path[level] = pde->val;
if (dma_pte_superpage(pde) || level == 1)
dump_page_info(m, start, path);
else
pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
level - 1, start, path);
path[level] = 0;
}
}
static int show_device_domain_translation(struct device *dev, void *data)
{
struct dmar_domain *domain = find_domain(dev);
struct seq_file *m = data;
u64 path[6] = { 0 };
if (!domain)
return 0;
seq_printf(m, "Device %s with pasid %d @0x%llx\n",
dev_name(dev), domain->default_pasid,
(u64)virt_to_phys(domain->pgd));
seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
seq_putc(m, '\n');
return 0;
}
static int domain_translation_struct_show(struct seq_file *m, void *unused)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&device_domain_lock, flags);
ret = bus_for_each_dev(&pci_bus_type, NULL, m,
show_device_domain_translation);
spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
#ifdef CONFIG_IRQ_REMAP #ifdef CONFIG_IRQ_REMAP
static void ir_tbl_remap_entry_show(struct seq_file *m, static void ir_tbl_remap_entry_show(struct seq_file *m,
struct intel_iommu *iommu) struct intel_iommu *iommu)
...@@ -396,6 +468,9 @@ void __init intel_iommu_debugfs_init(void) ...@@ -396,6 +468,9 @@ void __init intel_iommu_debugfs_init(void)
&iommu_regset_fops); &iommu_regset_fops);
debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug, debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
NULL, &dmar_translation_struct_fops); NULL, &dmar_translation_struct_fops);
debugfs_create_file("domain_translation_struct", 0444,
intel_iommu_debug, NULL,
&domain_translation_struct_fops);
#ifdef CONFIG_IRQ_REMAP #ifdef CONFIG_IRQ_REMAP
debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug, debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
NULL, &ir_translation_struct_fops); NULL, &ir_translation_struct_fops);
......
This diff is collapsed.
...@@ -26,42 +26,6 @@ ...@@ -26,42 +26,6 @@
*/ */
static DEFINE_SPINLOCK(pasid_lock); static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX; u32 intel_pasid_max_id = PASID_MAX;
static DEFINE_IDR(pasid_idr);
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
{
int ret, min, max;
min = max_t(int, start, PASID_MIN);
max = min_t(int, end, intel_pasid_max_id);
WARN_ON(in_interrupt());
idr_preload(gfp);
spin_lock(&pasid_lock);
ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
spin_unlock(&pasid_lock);
idr_preload_end();
return ret;
}
void intel_pasid_free_id(int pasid)
{
spin_lock(&pasid_lock);
idr_remove(&pasid_idr, pasid);
spin_unlock(&pasid_lock);
}
void *intel_pasid_lookup_id(int pasid)
{
void *p;
spin_lock(&pasid_lock);
p = idr_find(&pasid_idr, pasid);
spin_unlock(&pasid_lock);
return p;
}
/* /*
* Per device pasid table management: * Per device pasid table management:
...@@ -465,6 +429,21 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, ...@@ -465,6 +429,21 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
devtlb_invalidation_with_pasid(iommu, dev, pasid); devtlb_invalidation_with_pasid(iommu, dev, pasid);
} }
static void pasid_flush_caches(struct intel_iommu *iommu,
struct pasid_entry *pte,
int pasid, u16 did)
{
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
iotlb_invalidation_with_pasid(iommu, did, pasid);
} else {
iommu_flush_write_buffer(iommu);
}
}
/* /*
* Set up the scalable mode pasid table entry for first only * Set up the scalable mode pasid table entry for first only
* translation type. * translation type.
...@@ -498,10 +477,15 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, ...@@ -498,10 +477,15 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
pasid_set_sre(pte); pasid_set_sre(pte);
} }
#ifdef CONFIG_X86 if (flags & PASID_FLAG_FL5LP) {
if (cpu_feature_enabled(X86_FEATURE_LA57)) if (cap_5lp_support(iommu->cap)) {
pasid_set_flpm(pte, 1); pasid_set_flpm(pte, 1);
#endif /* CONFIG_X86 */ } else {
pr_err("No 5-level paging support for first-level\n");
pasid_clear_entry(pte);
return -EINVAL;
}
}
pasid_set_domain_id(pte, did); pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw); pasid_set_address_width(pte, iommu->agaw);
...@@ -510,16 +494,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, ...@@ -510,16 +494,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
/* Setup Present and PASID Granular Transfer Type: */ /* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, 1); pasid_set_translation_type(pte, 1);
pasid_set_present(pte); pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
iotlb_invalidation_with_pasid(iommu, did, pasid);
} else {
iommu_flush_write_buffer(iommu);
}
return 0; return 0;
} }
...@@ -583,16 +558,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, ...@@ -583,16 +558,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
*/ */
pasid_set_sre(pte); pasid_set_sre(pte);
pasid_set_present(pte); pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
iotlb_invalidation_with_pasid(iommu, did, pasid);
} else {
iommu_flush_write_buffer(iommu);
}
return 0; return 0;
} }
...@@ -626,16 +592,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, ...@@ -626,16 +592,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
*/ */
pasid_set_sre(pte); pasid_set_sre(pte);
pasid_set_present(pte); pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
iotlb_invalidation_with_pasid(iommu, did, pasid);
} else {
iommu_flush_write_buffer(iommu);
}
return 0; return 0;
} }
...@@ -37,6 +37,12 @@ ...@@ -37,6 +37,12 @@
*/ */
#define PASID_FLAG_SUPERVISOR_MODE BIT(0) #define PASID_FLAG_SUPERVISOR_MODE BIT(0)
/*
* The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
* level translation, otherwise, 4-level paging will be used.
*/
#define PASID_FLAG_FL5LP BIT(1)
struct pasid_dir_entry { struct pasid_dir_entry {
u64 val; u64 val;
}; };
......
...@@ -17,25 +17,13 @@ ...@@ -17,25 +17,13 @@
#include <linux/dmar.h> #include <linux/dmar.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/ioasid.h>
#include <asm/page.h> #include <asm/page.h>
#include "intel-pasid.h" #include "intel-pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d); static irqreturn_t prq_event_thread(int irq, void *d);
int intel_svm_init(struct intel_iommu *iommu)
{
if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
!cap_fl1gp_support(iommu->cap))
return -EINVAL;
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
!cap_5lp_support(iommu->cap))
return -EINVAL;
return 0;
}
#define PRQ_ORDER 0 #define PRQ_ORDER 0
int intel_svm_enable_prq(struct intel_iommu *iommu) int intel_svm_enable_prq(struct intel_iommu *iommu)
...@@ -99,6 +87,33 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) ...@@ -99,6 +87,33 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
return 0; return 0;
} }
static inline bool intel_svm_capable(struct intel_iommu *iommu)
{
return iommu->flags & VTD_FLAG_SVM_CAPABLE;
}
void intel_svm_check(struct intel_iommu *iommu)
{
if (!pasid_supported(iommu))
return;
if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
!cap_fl1gp_support(iommu->cap)) {
pr_err("%s SVM disabled, incompatible 1GB page capability\n",
iommu->name);
return;
}
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
!cap_5lp_support(iommu->cap)) {
pr_err("%s SVM disabled, incompatible paging mode\n",
iommu->name);
return;
}
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
}
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
unsigned long address, unsigned long pages, int ih) unsigned long address, unsigned long pages, int ih)
{ {
...@@ -207,6 +222,10 @@ static const struct mmu_notifier_ops intel_mmuops = { ...@@ -207,6 +222,10 @@ static const struct mmu_notifier_ops intel_mmuops = {
static DEFINE_MUTEX(pasid_mutex); static DEFINE_MUTEX(pasid_mutex);
static LIST_HEAD(global_svm_list); static LIST_HEAD(global_svm_list);
#define for_each_svm_dev(sdev, svm, d) \
list_for_each_entry((sdev), &(svm)->devs, list) \
if ((d) != (sdev)->dev) {} else
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{ {
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
...@@ -220,6 +239,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -220,6 +239,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (!iommu || dmar_disabled) if (!iommu || dmar_disabled)
return -EINVAL; return -EINVAL;
if (!intel_svm_capable(iommu))
return -ENOTSUPP;
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
pasid_max = pci_max_pasids(to_pci_dev(dev)); pasid_max = pci_max_pasids(to_pci_dev(dev));
if (pasid_max < 0) if (pasid_max < 0)
...@@ -252,15 +274,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -252,15 +274,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
goto out; goto out;
} }
list_for_each_entry(sdev, &svm->devs, list) { /* Find the matching device in svm list */
if (dev == sdev->dev) { for_each_svm_dev(sdev, svm, dev) {
if (sdev->ops != ops) { if (sdev->ops != ops) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
}
sdev->users++;
goto success;
} }
sdev->users++;
goto success;
} }
break; break;
...@@ -314,16 +335,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -314,16 +335,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (pasid_max > intel_pasid_max_id) if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id; pasid_max = intel_pasid_max_id;
/* Do not use PASID 0 in caching mode (virtualised IOMMU) */ /* Do not use PASID 0, reserved for RID to PASID */
ret = intel_pasid_alloc_id(svm, svm->pasid = ioasid_alloc(NULL, PASID_MIN,
!!cap_caching_mode(iommu->cap), pasid_max - 1, svm);
pasid_max - 1, GFP_KERNEL); if (svm->pasid == INVALID_IOASID) {
if (ret < 0) {
kfree(svm); kfree(svm);
kfree(sdev); kfree(sdev);
ret = -ENOSPC;
goto out; goto out;
} }
svm->pasid = ret;
svm->notifier.ops = &intel_mmuops; svm->notifier.ops = &intel_mmuops;
svm->mm = mm; svm->mm = mm;
svm->flags = flags; svm->flags = flags;
...@@ -333,7 +353,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -333,7 +353,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (mm) { if (mm) {
ret = mmu_notifier_register(&svm->notifier, mm); ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) { if (ret) {
intel_pasid_free_id(svm->pasid); ioasid_free(svm->pasid);
kfree(svm); kfree(svm);
kfree(sdev); kfree(sdev);
goto out; goto out;
...@@ -344,12 +364,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -344,12 +364,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
ret = intel_pasid_setup_first_level(iommu, dev, ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd, mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID, svm->pasid, FLPT_DEFAULT_DID,
mm ? 0 : PASID_FLAG_SUPERVISOR_MODE); (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
spin_unlock(&iommu->lock); spin_unlock(&iommu->lock);
if (ret) { if (ret) {
if (mm) if (mm)
mmu_notifier_unregister(&svm->notifier, mm); mmu_notifier_unregister(&svm->notifier, mm);
intel_pasid_free_id(svm->pasid); ioasid_free(svm->pasid);
kfree(svm); kfree(svm);
kfree(sdev); kfree(sdev);
goto out; goto out;
...@@ -365,7 +387,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -365,7 +387,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
ret = intel_pasid_setup_first_level(iommu, dev, ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd, mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID, svm->pasid, FLPT_DEFAULT_DID,
mm ? 0 : PASID_FLAG_SUPERVISOR_MODE); (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
spin_unlock(&iommu->lock); spin_unlock(&iommu->lock);
if (ret) { if (ret) {
kfree(sdev); kfree(sdev);
...@@ -397,44 +421,45 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) ...@@ -397,44 +421,45 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
if (!iommu) if (!iommu)
goto out; goto out;
svm = intel_pasid_lookup_id(pasid); svm = ioasid_find(NULL, pasid, NULL);
if (!svm) if (!svm)
goto out; goto out;
list_for_each_entry(sdev, &svm->devs, list) { if (IS_ERR(svm)) {
if (dev == sdev->dev) { ret = PTR_ERR(svm);
ret = 0; goto out;
sdev->users--; }
if (!sdev->users) {
list_del_rcu(&sdev->list); for_each_svm_dev(sdev, svm, dev) {
/* Flush the PASID cache and IOTLB for this device. ret = 0;
* Note that we do depend on the hardware *not* using sdev->users--;
* the PASID any more. Just as we depend on other if (!sdev->users) {
* devices never using PASIDs that they have no right list_del_rcu(&sdev->list);
* to use. We have a *shared* PASID table, because it's /* Flush the PASID cache and IOTLB for this device.
* large and has to be physically contiguous. So it's * Note that we do depend on the hardware *not* using
* hard to be as defensive as we might like. */ * the PASID any more. Just as we depend on other
intel_pasid_tear_down_entry(iommu, dev, svm->pasid); * devices never using PASIDs that they have no right
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); * to use. We have a *shared* PASID table, because it's
kfree_rcu(sdev, rcu); * large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */
if (list_empty(&svm->devs)) { intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
intel_pasid_free_id(svm->pasid); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
if (svm->mm) kfree_rcu(sdev, rcu);
mmu_notifier_unregister(&svm->notifier, svm->mm);
if (list_empty(&svm->devs)) {
list_del(&svm->list); ioasid_free(svm->pasid);
if (svm->mm)
/* We mandate that no page faults may be outstanding mmu_notifier_unregister(&svm->notifier, svm->mm);
* for the PASID when intel_svm_unbind_mm() is called. list_del(&svm->list);
* If that is not obeyed, subtle errors will happen. /* We mandate that no page faults may be outstanding
* Let's make them less subtle... */ * for the PASID when intel_svm_unbind_mm() is called.
memset(svm, 0x6b, sizeof(*svm)); * If that is not obeyed, subtle errors will happen.
kfree(svm); * Let's make them less subtle... */
} memset(svm, 0x6b, sizeof(*svm));
kfree(svm);
} }
break;
} }
break;
} }
out: out:
mutex_unlock(&pasid_mutex); mutex_unlock(&pasid_mutex);
...@@ -454,10 +479,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid) ...@@ -454,10 +479,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
if (!iommu) if (!iommu)
goto out; goto out;
svm = intel_pasid_lookup_id(pasid); svm = ioasid_find(NULL, pasid, NULL);
if (!svm) if (!svm)
goto out; goto out;
if (IS_ERR(svm)) {
ret = PTR_ERR(svm);
goto out;
}
/* init_mm is used in this case */ /* init_mm is used in this case */
if (!svm->mm) if (!svm->mm)
ret = 1; ret = 1;
...@@ -564,13 +593,12 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -564,13 +593,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!svm || svm->pasid != req->pasid) { if (!svm || svm->pasid != req->pasid) {
rcu_read_lock(); rcu_read_lock();
svm = intel_pasid_lookup_id(req->pasid); svm = ioasid_find(NULL, req->pasid, NULL);
/* It *can't* go away, because the driver is not permitted /* It *can't* go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding. * to unbind the mm while any page faults are outstanding.
* So we only need RCU to protect the internal idr code. */ * So we only need RCU to protect the internal idr code. */
rcu_read_unlock(); rcu_read_unlock();
if (IS_ERR_OR_NULL(svm)) {
if (!svm) {
pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n", pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
iommu->name, req->pasid, ((unsigned long long *)req)[0], iommu->name, req->pasid, ((unsigned long long *)req)[0],
((unsigned long long *)req)[1]); ((unsigned long long *)req)[1]);
...@@ -654,11 +682,10 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -654,11 +682,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (req->priv_data_present) if (req->priv_data_present)
memcpy(&resp.qw2, req->priv_data, memcpy(&resp.qw2, req->priv_data,
sizeof(req->priv_data)); sizeof(req->priv_data));
resp.qw2 = 0;
resp.qw3 = 0;
qi_submit_sync(&resp, iommu);
} }
resp.qw2 = 0;
resp.qw3 = 0;
qi_submit_sync(&resp, iommu);
head = (head + sizeof(*req)) & PRQ_RING_MASK; head = (head + sizeof(*req)) & PRQ_RING_MASK;
} }
......
...@@ -149,8 +149,6 @@ ...@@ -149,8 +149,6 @@
#define ARM_V7S_TTBR_IRGN_ATTR(attr) \ #define ARM_V7S_TTBR_IRGN_ATTR(attr) \
((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1)) ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
#define ARM_V7S_TCR_PD1 BIT(5)
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32 #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32 #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
...@@ -798,8 +796,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, ...@@ -798,8 +796,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
*/ */
cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M; cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
/* TCR: T0SZ=0, disable TTBR1 */ /* TCR: T0SZ=0, EAE=0 (if applicable) */
cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1; cfg->arm_v7s_cfg.tcr = 0;
/* /*
* TEX remap: the indices used map to the closest equivalent types * TEX remap: the indices used map to the closest equivalent types
...@@ -822,15 +820,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, ...@@ -822,15 +820,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
/* Ensure the empty pgd is visible before any actual TTBR write */ /* Ensure the empty pgd is visible before any actual TTBR write */
wmb(); wmb();
/* TTBRs */ /* TTBR */
cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) | cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS | (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
(cfg->coherent_walk ? ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
(ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) : (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
(ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) | ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
cfg->arm_v7s_cfg.ttbr[1] = 0;
return &data->iop; return &data->iop;
out_free_data: out_free_data:
......
This diff is collapsed.
...@@ -63,7 +63,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops) ...@@ -63,7 +63,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
if (!ops) if (!ops)
return; return;
iop = container_of(ops, struct io_pgtable, ops); iop = io_pgtable_ops_to_pgtable(ops);
io_pgtable_tlb_flush_all(iop); io_pgtable_tlb_flush_all(iop);
io_pgtable_init_table[iop->fmt]->free(iop); io_pgtable_init_table[iop->fmt]->free(iop);
} }
......
...@@ -87,6 +87,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu, ...@@ -87,6 +87,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
put_device(iommu->dev); put_device(iommu->dev);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(iommu_device_sysfs_add);
void iommu_device_sysfs_remove(struct iommu_device *iommu) void iommu_device_sysfs_remove(struct iommu_device *iommu)
{ {
...@@ -94,6 +95,8 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu) ...@@ -94,6 +95,8 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu)
device_unregister(iommu->dev); device_unregister(iommu->dev);
iommu->dev = NULL; iommu->dev = NULL;
} }
EXPORT_SYMBOL_GPL(iommu_device_sysfs_remove);
/* /*
* IOMMU drivers can indicate a device is managed by a given IOMMU using * IOMMU drivers can indicate a device is managed by a given IOMMU using
* this interface. A link to the device will be created in the "devices" * this interface. A link to the device will be created in the "devices"
...@@ -119,6 +122,7 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link) ...@@ -119,6 +122,7 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(iommu_device_link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link) void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
{ {
...@@ -128,3 +132,4 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link) ...@@ -128,3 +132,4 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
sysfs_remove_link(&link->kobj, "iommu"); sysfs_remove_link(&link->kobj, "iommu");
sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
} }
EXPORT_SYMBOL_GPL(iommu_device_unlink);
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/property.h> #include <linux/property.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
#include <linux/module.h>
#include <trace/events/iommu.h> #include <trace/events/iommu.h>
static struct kset *iommu_group_kset; static struct kset *iommu_group_kset;
...@@ -141,6 +142,7 @@ int iommu_device_register(struct iommu_device *iommu) ...@@ -141,6 +142,7 @@ int iommu_device_register(struct iommu_device *iommu)
spin_unlock(&iommu_device_lock); spin_unlock(&iommu_device_lock);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_device_register);
void iommu_device_unregister(struct iommu_device *iommu) void iommu_device_unregister(struct iommu_device *iommu)
{ {
...@@ -148,6 +150,7 @@ void iommu_device_unregister(struct iommu_device *iommu) ...@@ -148,6 +150,7 @@ void iommu_device_unregister(struct iommu_device *iommu)
list_del(&iommu->list); list_del(&iommu->list);
spin_unlock(&iommu_device_lock); spin_unlock(&iommu_device_lock);
} }
EXPORT_SYMBOL_GPL(iommu_device_unregister);
static struct iommu_param *iommu_get_dev_param(struct device *dev) static struct iommu_param *iommu_get_dev_param(struct device *dev)
{ {
...@@ -183,10 +186,21 @@ int iommu_probe_device(struct device *dev) ...@@ -183,10 +186,21 @@ int iommu_probe_device(struct device *dev)
if (!iommu_get_dev_param(dev)) if (!iommu_get_dev_param(dev))
return -ENOMEM; return -ENOMEM;
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
goto err_free_dev_param;
}
ret = ops->add_device(dev); ret = ops->add_device(dev);
if (ret) if (ret)
iommu_free_dev_param(dev); goto err_module_put;
return 0;
err_module_put:
module_put(ops->owner);
err_free_dev_param:
iommu_free_dev_param(dev);
return ret; return ret;
} }
...@@ -197,7 +211,10 @@ void iommu_release_device(struct device *dev) ...@@ -197,7 +211,10 @@ void iommu_release_device(struct device *dev)
if (dev->iommu_group) if (dev->iommu_group)
ops->remove_device(dev); ops->remove_device(dev);
iommu_free_dev_param(dev); if (dev->iommu_param) {
module_put(ops->owner);
iommu_free_dev_param(dev);
}
} }
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
...@@ -887,6 +904,7 @@ struct iommu_group *iommu_group_ref_get(struct iommu_group *group) ...@@ -887,6 +904,7 @@ struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
kobject_get(group->devices_kobj); kobject_get(group->devices_kobj);
return group; return group;
} }
EXPORT_SYMBOL_GPL(iommu_group_ref_get);
/** /**
* iommu_group_put - Decrement group reference * iommu_group_put - Decrement group reference
...@@ -1260,6 +1278,7 @@ struct iommu_group *generic_device_group(struct device *dev) ...@@ -1260,6 +1278,7 @@ struct iommu_group *generic_device_group(struct device *dev)
{ {
return iommu_group_alloc(); return iommu_group_alloc();
} }
EXPORT_SYMBOL_GPL(generic_device_group);
/* /*
* Use standard PCI bus topology, isolation features, and DMA alias quirks * Use standard PCI bus topology, isolation features, and DMA alias quirks
...@@ -1327,6 +1346,7 @@ struct iommu_group *pci_device_group(struct device *dev) ...@@ -1327,6 +1346,7 @@ struct iommu_group *pci_device_group(struct device *dev)
/* No shared group found, allocate new */ /* No shared group found, allocate new */
return iommu_group_alloc(); return iommu_group_alloc();
} }
EXPORT_SYMBOL_GPL(pci_device_group);
/* Get the IOMMU group for device on fsl-mc bus */ /* Get the IOMMU group for device on fsl-mc bus */
struct iommu_group *fsl_mc_device_group(struct device *dev) struct iommu_group *fsl_mc_device_group(struct device *dev)
...@@ -1339,6 +1359,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev) ...@@ -1339,6 +1359,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
group = iommu_group_alloc(); group = iommu_group_alloc();
return group; return group;
} }
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
/** /**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device * iommu_group_get_for_dev - Find or create the IOMMU group for a device
...@@ -1407,6 +1428,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) ...@@ -1407,6 +1428,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
return group; return group;
} }
EXPORT_SYMBOL(iommu_group_get_for_dev);
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{ {
...@@ -1537,6 +1559,11 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) ...@@ -1537,6 +1559,11 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
{ {
int err; int err;
if (ops == NULL) {
bus->iommu_ops = NULL;
return 0;
}
if (bus->iommu_ops != NULL) if (bus->iommu_ops != NULL)
return -EBUSY; return -EBUSY;
...@@ -2230,6 +2257,25 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list) ...@@ -2230,6 +2257,25 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
ops->put_resv_regions(dev, list); ops->put_resv_regions(dev, list);
} }
/**
* generic_iommu_put_resv_regions - Reserved region driver helper
* @dev: device for which to free reserved regions
* @list: reserved region list for device
*
* IOMMU drivers can use this to implement their .put_resv_regions() callback
* for simple reservations. Memory allocated for each reserved region will be
* freed. If an IOMMU driver allocates additional resources per region, it is
* going to have to implement a custom callback.
*/
void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, list, list)
kfree(entry);
}
EXPORT_SYMBOL(generic_iommu_put_resv_regions);
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
size_t length, int prot, size_t length, int prot,
enum iommu_resv_type type) enum iommu_resv_type type)
...@@ -2247,6 +2293,7 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, ...@@ -2247,6 +2293,7 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
region->type = type; region->type = type;
return region; return region;
} }
EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
static int static int
request_default_domain_for_dev(struct device *dev, unsigned long type) request_default_domain_for_dev(struct device *dev, unsigned long type)
......
...@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex); ...@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
struct iova *alloc_iova_mem(void) struct iova *alloc_iova_mem(void)
{ {
return kmem_cache_zalloc(iova_cache, GFP_ATOMIC); return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
} }
EXPORT_SYMBOL(alloc_iova_mem); EXPORT_SYMBOL(alloc_iova_mem);
......
...@@ -374,7 +374,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) ...@@ -374,7 +374,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
u32 tmp; u32 tmp;
/* TTBR0 */ /* TTBR0 */
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
......
...@@ -279,8 +279,8 @@ static void __program_context(void __iomem *base, int ctx, ...@@ -279,8 +279,8 @@ static void __program_context(void __iomem *base, int ctx,
SET_V2PCFG(base, ctx, 0x3); SET_V2PCFG(base, ctx, 0x3);
SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]); SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]); SET_TTBR1(base, ctx, 0);
/* Set prrr and nmrr */ /* Set prrr and nmrr */
SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
......
...@@ -367,7 +367,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, ...@@ -367,7 +367,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
/* Update the pgtable base address register of the M4U HW */ /* Update the pgtable base address register of the M4U HW */
if (!data->m4u_dom) { if (!data->m4u_dom) {
data->m4u_dom = dom; data->m4u_dom = dom;
writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK, writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
data->base + REG_MMU_PT_BASE_ADDR); data->base + REG_MMU_PT_BASE_ADDR);
} }
...@@ -765,7 +765,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) ...@@ -765,7 +765,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
if (m4u_dom) if (m4u_dom)
writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK, writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
base + REG_MMU_PT_BASE_ADDR); base + REG_MMU_PT_BASE_ADDR);
return 0; return 0;
} }
......
...@@ -8,11 +8,12 @@ ...@@ -8,11 +8,12 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/limits.h> #include <linux/limits.h>
#include <linux/pci.h> #include <linux/module.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_iommu.h> #include <linux/of_iommu.h>
#include <linux/of_pci.h> #include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
...@@ -91,16 +92,16 @@ static int of_iommu_xlate(struct device *dev, ...@@ -91,16 +92,16 @@ static int of_iommu_xlate(struct device *dev,
{ {
const struct iommu_ops *ops; const struct iommu_ops *ops;
struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
int err; int ret;
ops = iommu_ops_from_fwnode(fwnode); ops = iommu_ops_from_fwnode(fwnode);
if ((ops && !ops->of_xlate) || if ((ops && !ops->of_xlate) ||
!of_device_is_available(iommu_spec->np)) !of_device_is_available(iommu_spec->np))
return NO_IOMMU; return NO_IOMMU;
err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
if (err) if (ret)
return err; return ret;
/* /*
* The otherwise-empty fwspec handily serves to indicate the specific * The otherwise-empty fwspec handily serves to indicate the specific
* IOMMU device we're waiting for, which will be useful if we ever get * IOMMU device we're waiting for, which will be useful if we ever get
...@@ -109,7 +110,12 @@ static int of_iommu_xlate(struct device *dev, ...@@ -109,7 +110,12 @@ static int of_iommu_xlate(struct device *dev,
if (!ops) if (!ops)
return driver_deferred_probe_check_state(dev); return driver_deferred_probe_check_state(dev);
return ops->of_xlate(dev, iommu_spec); if (!try_module_get(ops->owner))
return -ENODEV;
ret = ops->of_xlate(dev, iommu_spec);
module_put(ops->owner);
return ret;
} }
struct of_pci_iommu_alias_info { struct of_pci_iommu_alias_info {
...@@ -179,6 +185,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, ...@@ -179,6 +185,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
.np = master_np, .np = master_np,
}; };
pci_request_acs();
err = pci_for_each_dma_alias(to_pci_dev(dev), err = pci_for_each_dma_alias(to_pci_dev(dev),
of_pci_iommu_init, &info); of_pci_iommu_init, &info);
} else if (dev_is_fsl_mc(dev)) { } else if (dev_is_fsl_mc(dev)) {
...@@ -196,8 +203,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, ...@@ -196,8 +203,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
if (err) if (err)
break; break;
} }
}
fwspec = dev_iommu_fwspec_get(dev);
if (!err && fwspec)
of_property_read_u32(master_np, "pasid-num-bits",
&fwspec->num_pasid_bits);
}
/* /*
* Two success conditions can be represented by non-negative err here: * Two success conditions can be represented by non-negative err here:
......
...@@ -201,7 +201,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) ...@@ -201,7 +201,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
if (!(fsr & FSR_FAULT)) if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE; return IRQ_NONE;
fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
...@@ -215,7 +215,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) ...@@ -215,7 +215,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
} }
iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE); iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -269,18 +269,15 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ...@@ -269,18 +269,15 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* TTBRs */ /* TTBRs */
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] | pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
FIELD_PREP(TTBRn_ASID, ctx->asid)); FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
FIELD_PREP(TTBRn_ASID, ctx->asid));
/* TCR */ /* TCR */
iommu_writel(ctx, ARM_SMMU_CB_TCR2, iommu_writel(ctx, ARM_SMMU_CB_TCR2,
(pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) | arm_smmu_lpae_tcr2(&pgtbl_cfg));
FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
iommu_writel(ctx, ARM_SMMU_CB_TCR, iommu_writel(ctx, ARM_SMMU_CB_TCR,
pgtbl_cfg.arm_lpae_s1_cfg.tcr); arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
/* MAIRs (stage-1 only) */ /* MAIRs (stage-1 only) */
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
...@@ -289,11 +286,13 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ...@@ -289,11 +286,13 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32); pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
/* SCTLR */ /* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG; ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
ARM_SMMU_SCTLR_CFCFG;
if (IS_ENABLED(CONFIG_BIG_ENDIAN)) if (IS_ENABLED(CONFIG_BIG_ENDIAN))
reg |= SCTLR_E; reg |= ARM_SMMU_SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
......
...@@ -837,14 +837,6 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head) ...@@ -837,14 +837,6 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
iommu_dma_get_resv_regions(dev, head); iommu_dma_get_resv_regions(dev, head);
} }
static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
kfree(entry);
}
static struct iommu_ops viommu_ops; static struct iommu_ops viommu_ops;
static struct virtio_driver virtio_iommu_drv; static struct virtio_driver virtio_iommu_drv;
...@@ -914,7 +906,7 @@ static int viommu_add_device(struct device *dev) ...@@ -914,7 +906,7 @@ static int viommu_add_device(struct device *dev)
err_unlink_dev: err_unlink_dev:
iommu_device_unlink(&viommu->iommu, dev); iommu_device_unlink(&viommu->iommu, dev);
err_free_dev: err_free_dev:
viommu_put_resv_regions(dev, &vdev->resv_regions); generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev); kfree(vdev);
return ret; return ret;
...@@ -932,7 +924,7 @@ static void viommu_remove_device(struct device *dev) ...@@ -932,7 +924,7 @@ static void viommu_remove_device(struct device *dev)
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
iommu_device_unlink(&vdev->viommu->iommu, dev); iommu_device_unlink(&vdev->viommu->iommu, dev);
viommu_put_resv_regions(dev, &vdev->resv_regions); generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev); kfree(vdev);
} }
...@@ -961,7 +953,7 @@ static struct iommu_ops viommu_ops = { ...@@ -961,7 +953,7 @@ static struct iommu_ops viommu_ops = {
.remove_device = viommu_remove_device, .remove_device = viommu_remove_device,
.device_group = viommu_device_group, .device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions, .get_resv_regions = viommu_get_resv_regions,
.put_resv_regions = viommu_put_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate, .of_xlate = viommu_of_xlate,
}; };
......
...@@ -69,6 +69,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps) ...@@ -69,6 +69,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
dev->ats_enabled = 1; dev->ats_enabled = 1;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(pci_enable_ats);
/** /**
* pci_disable_ats - disable the ATS capability * pci_disable_ats - disable the ATS capability
...@@ -87,6 +88,7 @@ void pci_disable_ats(struct pci_dev *dev) ...@@ -87,6 +88,7 @@ void pci_disable_ats(struct pci_dev *dev)
dev->ats_enabled = 0; dev->ats_enabled = 0;
} }
EXPORT_SYMBOL_GPL(pci_disable_ats);
void pci_restore_ats_state(struct pci_dev *dev) void pci_restore_ats_state(struct pci_dev *dev)
{ {
......
...@@ -131,6 +131,7 @@ bool pci_ats_disabled(void) ...@@ -131,6 +131,7 @@ bool pci_ats_disabled(void)
{ {
return pcie_ats_disabled; return pcie_ats_disabled;
} }
EXPORT_SYMBOL_GPL(pci_ats_disabled);
/* Disable bridge_d3 for all PCIe ports */ /* Disable bridge_d3 for all PCIe ports */
static bool pci_bridge_d3_disable; static bool pci_bridge_d3_disable;
......
...@@ -34,10 +34,13 @@ ...@@ -34,10 +34,13 @@
#define VTD_STRIDE_SHIFT (9) #define VTD_STRIDE_SHIFT (9)
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) #define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
#define DMA_PTE_READ (1) #define DMA_PTE_READ BIT_ULL(0)
#define DMA_PTE_WRITE (2) #define DMA_PTE_WRITE BIT_ULL(1)
#define DMA_PTE_LARGE_PAGE (1 << 7) #define DMA_PTE_LARGE_PAGE BIT_ULL(7)
#define DMA_PTE_SNP (1 << 11) #define DMA_PTE_SNP BIT_ULL(11)
#define DMA_FL_PTE_PRESENT BIT_ULL(0)
#define DMA_FL_PTE_XD BIT_ULL(63)
#define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_DEV_IOTLB 1
...@@ -435,8 +438,10 @@ enum { ...@@ -435,8 +438,10 @@ enum {
#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
#define VTD_FLAG_SVM_CAPABLE (1 << 2)
extern int intel_iommu_sm; extern int intel_iommu_sm;
extern spinlock_t device_domain_lock;
#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
#define pasid_supported(iommu) (sm_supported(iommu) && \ #define pasid_supported(iommu) (sm_supported(iommu) && \
...@@ -609,10 +614,11 @@ static inline void dma_clear_pte(struct dma_pte *pte) ...@@ -609,10 +614,11 @@ static inline void dma_clear_pte(struct dma_pte *pte)
static inline u64 dma_pte_addr(struct dma_pte *pte) static inline u64 dma_pte_addr(struct dma_pte *pte)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return pte->val & VTD_PAGE_MASK; return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
#else #else
/* Must have a full atomic 64-bit read */ /* Must have a full atomic 64-bit read */
return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
#endif #endif
} }
...@@ -645,6 +651,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, ...@@ -645,6 +651,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type); unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask); u16 qdep, u64 addr, unsigned mask);
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
unsigned long npages, bool ih);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void); extern int dmar_ir_support(void);
...@@ -656,9 +664,10 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info, ...@@ -656,9 +664,10 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
void *data), void *data); void *data), void *data);
void iommu_flush_write_buffer(struct intel_iommu *iommu); void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct dmar_domain *find_domain(struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_SVM #ifdef CONFIG_INTEL_IOMMU_SVM
int intel_svm_init(struct intel_iommu *iommu); extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu);
...@@ -686,6 +695,8 @@ struct intel_svm { ...@@ -686,6 +695,8 @@ struct intel_svm {
}; };
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
#endif #endif
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
......
...@@ -83,12 +83,16 @@ struct io_pgtable_cfg { ...@@ -83,12 +83,16 @@ struct io_pgtable_cfg {
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
* on unmap, for DMA domains using the flush queue mechanism for * on unmap, for DMA domains using the flush queue mechanism for
* delayed invalidation. * delayed invalidation.
*
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
* for use in the upper half of a split address space.
*/ */
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
unsigned long quirks; unsigned long quirks;
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
unsigned int ias; unsigned int ias;
...@@ -100,18 +104,33 @@ struct io_pgtable_cfg { ...@@ -100,18 +104,33 @@ struct io_pgtable_cfg {
/* Low-level data specific to the table format */ /* Low-level data specific to the table format */
union { union {
struct { struct {
u64 ttbr[2]; u64 ttbr;
u64 tcr; struct {
u32 ips:3;
u32 tg:2;
u32 sh:2;
u32 orgn:2;
u32 irgn:2;
u32 tsz:6;
} tcr;
u64 mair; u64 mair;
} arm_lpae_s1_cfg; } arm_lpae_s1_cfg;
struct { struct {
u64 vttbr; u64 vttbr;
u64 vtcr; struct {
u32 ps:3;
u32 tg:2;
u32 sh:2;
u32 orgn:2;
u32 irgn:2;
u32 sl:2;
u32 tsz:6;
} vtcr;
} arm_lpae_s2_cfg; } arm_lpae_s2_cfg;
struct { struct {
u32 ttbr[2]; u32 ttbr;
u32 tcr; u32 tcr;
u32 nmrr; u32 nmrr;
u32 prrr; u32 prrr;
......
...@@ -246,9 +246,10 @@ struct iommu_iotlb_gather { ...@@ -246,9 +246,10 @@ struct iommu_iotlb_gather {
* @sva_get_pasid: Get PASID associated to a SVA handle * @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response * @page_response: handle page request response
* @cache_invalidate: invalidate translation caches * @cache_invalidate: invalidate translation caches
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @sva_bind_gpasid: bind guest pasid and mm * @sva_bind_gpasid: bind guest pasid and mm
* @sva_unbind_gpasid: unbind guest pasid and mm * @sva_unbind_gpasid: unbind guest pasid and mm
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
*/ */
struct iommu_ops { struct iommu_ops {
bool (*capable)(enum iommu_cap); bool (*capable)(enum iommu_cap);
...@@ -318,6 +319,7 @@ struct iommu_ops { ...@@ -318,6 +319,7 @@ struct iommu_ops {
int (*sva_unbind_gpasid)(struct device *dev, int pasid); int (*sva_unbind_gpasid)(struct device *dev, int pasid);
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
struct module *owner;
}; };
/** /**
...@@ -386,12 +388,19 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu); ...@@ -386,12 +388,19 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link); int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
static inline void iommu_device_set_ops(struct iommu_device *iommu, static inline void __iommu_device_set_ops(struct iommu_device *iommu,
const struct iommu_ops *ops) const struct iommu_ops *ops)
{ {
iommu->ops = ops; iommu->ops = ops;
} }
#define iommu_device_set_ops(iommu, ops) \
do { \
struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
__ops->owner = THIS_MODULE; \
__iommu_device_set_ops(iommu, __ops); \
} while (0)
static inline void iommu_device_set_fwnode(struct iommu_device *iommu, static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
struct fwnode_handle *fwnode) struct fwnode_handle *fwnode)
{ {
...@@ -456,6 +465,8 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain, ...@@ -456,6 +465,8 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain,
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern void generic_iommu_put_resv_regions(struct device *dev,
struct list_head *list);
extern int iommu_request_dm_for_dev(struct device *dev); extern int iommu_request_dm_for_dev(struct device *dev);
extern int iommu_request_dma_domain_for_dev(struct device *dev); extern int iommu_request_dma_domain_for_dev(struct device *dev);
extern void iommu_set_default_passthrough(bool cmd_line); extern void iommu_set_default_passthrough(bool cmd_line);
...@@ -570,6 +581,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev); ...@@ -570,6 +581,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev);
* @ops: ops for this device's IOMMU * @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU
* @iommu_priv: IOMMU driver private data for this device * @iommu_priv: IOMMU driver private data for this device
* @num_pasid_bits: number of PASID bits supported by this device
* @num_ids: number of associated device IDs * @num_ids: number of associated device IDs
* @ids: IDs which this device may present to the IOMMU * @ids: IDs which this device may present to the IOMMU
*/ */
...@@ -578,6 +590,7 @@ struct iommu_fwspec { ...@@ -578,6 +590,7 @@ struct iommu_fwspec {
struct fwnode_handle *iommu_fwnode; struct fwnode_handle *iommu_fwnode;
void *iommu_priv; void *iommu_priv;
u32 flags; u32 flags;
u32 num_pasid_bits;
unsigned int num_ids; unsigned int num_ids;
u32 ids[1]; u32 ids[1];
}; };
......
...@@ -33,6 +33,9 @@ void pci_disable_pasid(struct pci_dev *pdev); ...@@ -33,6 +33,9 @@ void pci_disable_pasid(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev); int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev); int pci_max_pasids(struct pci_dev *pdev);
#else /* CONFIG_PCI_PASID */ #else /* CONFIG_PCI_PASID */
static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
{ return -EINVAL; }
static inline void pci_disable_pasid(struct pci_dev *pdev) { }
static inline int pci_pasid_features(struct pci_dev *pdev) static inline int pci_pasid_features(struct pci_dev *pdev)
{ return -EINVAL; } { return -EINVAL; }
static inline int pci_max_pasids(struct pci_dev *pdev) static inline int pci_max_pasids(struct pci_dev *pdev)
......
...@@ -49,12 +49,6 @@ DEFINE_EVENT(dma_map, map_single, ...@@ -49,12 +49,6 @@ DEFINE_EVENT(dma_map, map_single,
TP_ARGS(dev, dev_addr, phys_addr, size) TP_ARGS(dev, dev_addr, phys_addr, size)
); );
DEFINE_EVENT(dma_map, map_sg,
TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
size_t size),
TP_ARGS(dev, dev_addr, phys_addr, size)
);
DEFINE_EVENT(dma_map, bounce_map_single, DEFINE_EVENT(dma_map, bounce_map_single,
TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
size_t size), size_t size),
...@@ -99,6 +93,48 @@ DEFINE_EVENT(dma_unmap, bounce_unmap_single, ...@@ -99,6 +93,48 @@ DEFINE_EVENT(dma_unmap, bounce_unmap_single,
TP_ARGS(dev, dev_addr, size) TP_ARGS(dev, dev_addr, size)
); );
DECLARE_EVENT_CLASS(dma_map_sg,
TP_PROTO(struct device *dev, int index, int total,
struct scatterlist *sg),
TP_ARGS(dev, index, total, sg),
TP_STRUCT__entry(
__string(dev_name, dev_name(dev))
__field(dma_addr_t, dev_addr)
__field(phys_addr_t, phys_addr)
__field(size_t, size)
__field(int, index)
__field(int, total)
),
TP_fast_assign(
__assign_str(dev_name, dev_name(dev));
__entry->dev_addr = sg->dma_address;
__entry->phys_addr = sg_phys(sg);
__entry->size = sg->dma_length;
__entry->index = index;
__entry->total = total;
),
TP_printk("dev=%s [%d/%d] dev_addr=0x%llx phys_addr=0x%llx size=%zu",
__get_str(dev_name), __entry->index, __entry->total,
(unsigned long long)__entry->dev_addr,
(unsigned long long)__entry->phys_addr,
__entry->size)
);
DEFINE_EVENT(dma_map_sg, map_sg,
TP_PROTO(struct device *dev, int index, int total,
struct scatterlist *sg),
TP_ARGS(dev, index, total, sg)
);
DEFINE_EVENT(dma_map_sg, bounce_map_sg,
TP_PROTO(struct device *dev, int index, int total,
struct scatterlist *sg),
TP_ARGS(dev, index, total, sg)
);
#endif /* _TRACE_INTEL_IOMMU_H */ #endif /* _TRACE_INTEL_IOMMU_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment