Commit e1cbc3b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:

 - Intel VT-d driver updates:
     - Domain force snooping improvement.
     - Cleanups, no intentional functional changes.

 - ARM SMMU driver updates:
     - Add new Qualcomm device-tree compatible strings
     - Add new Nvidia device-tree compatible string for Tegra234
     - Fix UAF in SMMUv3 shared virtual addressing code
     - Force identity-mapped domains for users of ye olde SMMU legacy
       binding
     - Minor cleanups

 - Fix a BUG_ON in the vfio_iommu_group_notifier:
     - Groundwork for upcoming iommufd framework
     - Introduction of DMA ownership so that an entire IOMMU group is
       either controlled by the kernel or by user-space

 - MT8195 and MT8186 support in the Mediatek IOMMU driver

 - Make forcing of cache-coherent DMA more coherent between IOMMU
   drivers

 - Fixes for thunderbolt device DMA protection

 - Various smaller fixes and cleanups

* tag 'iommu-updates-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (88 commits)
  iommu/amd: Increase timeout waiting for GA log enablement
  iommu/s390: Tolerate repeat attach_dev calls
  iommu/vt-d: Remove hard coding PGSNP bit in PASID entries
  iommu/vt-d: Remove domain_update_iommu_snooping()
  iommu/vt-d: Check domain force_snooping against attached devices
  iommu/vt-d: Block force-snoop domain attaching if no SC support
  iommu/vt-d: Size Page Request Queue to avoid overflow condition
  iommu/vt-d: Fold dmar_insert_one_dev_info() into its caller
  iommu/vt-d: Change return type of dmar_insert_one_dev_info()
  iommu/vt-d: Remove unneeded validity check on dev
  iommu/dma: Explicitly sort PCI DMA windows
  iommu/dma: Fix iova map result check bug
  iommu/mediatek: Fix NULL pointer dereference when printing dev_name
  iommu: iommu_group_claim_dma_owner() must always assign a domain
  iommu/arm-smmu: Force identity domains for legacy binding
  iommu/arm-smmu: Support Tegra234 SMMU
  dt-bindings: arm-smmu: Add compatible for Tegra234 SOC
  dt-bindings: arm-smmu: Document nvidia,memory-controller property
  iommu/arm-smmu-qcom: Add SC8280XP support
  dt-bindings: arm-smmu: Add compatible for Qualcomm SC8280XP
  ...
parents 3335d555 b0dacee2
......@@ -37,8 +37,10 @@ properties:
- qcom,sc7180-smmu-500
- qcom,sc7280-smmu-500
- qcom,sc8180x-smmu-500
- qcom,sc8280xp-smmu-500
- qcom,sdm845-smmu-500
- qcom,sdx55-smmu-500
- qcom,sdx65-smmu-500
- qcom,sm6350-smmu-500
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
......@@ -62,8 +64,9 @@ properties:
for improved performance.
items:
- enum:
- nvidia,tegra194-smmu
- nvidia,tegra186-smmu
- nvidia,tegra194-smmu
- nvidia,tegra234-smmu
- const: nvidia,smmu-500
- items:
- const: arm,mmu-500
......@@ -157,6 +160,17 @@ properties:
power-domains:
maxItems: 1
nvidia,memory-controller:
description: |
A phandle to the memory controller on NVIDIA Tegra186 and later SoCs.
The memory controller needs to be programmed with a mapping of memory
client IDs to ARM SMMU stream IDs.
If this property is absent, the mapping programmed by early firmware
will be used and it is not guaranteed that IOMMU translations will be
enabled for any given device.
$ref: /schemas/types.yaml#/definitions/phandle
required:
- compatible
- reg
......@@ -172,13 +186,20 @@ allOf:
compatible:
contains:
enum:
- nvidia,tegra194-smmu
- nvidia,tegra186-smmu
- nvidia,tegra194-smmu
- nvidia,tegra234-smmu
then:
properties:
reg:
minItems: 1
maxItems: 2
# The reference to the memory controller is required to ensure that the
# memory client to stream ID mapping can be done synchronously with the
# IOMMU attachment.
required:
- nvidia,memory-controller
else:
properties:
reg:
......
......@@ -76,7 +76,11 @@ properties:
- mediatek,mt8167-m4u # generation two
- mediatek,mt8173-m4u # generation two
- mediatek,mt8183-m4u # generation two
- mediatek,mt8186-iommu-mm # generation two
- mediatek,mt8192-m4u # generation two
- mediatek,mt8195-iommu-vdo # generation two
- mediatek,mt8195-iommu-vpp # generation two
- mediatek,mt8195-iommu-infra # generation two
- description: mt7623 generation one
items:
......@@ -119,7 +123,9 @@ properties:
dt-binding/memory/mt8167-larb-port.h for mt8167,
dt-binding/memory/mt8173-larb-port.h for mt8173,
dt-binding/memory/mt8183-larb-port.h for mt8183,
dt-binding/memory/mt8186-memory-port.h for mt8186,
dt-binding/memory/mt8192-larb-port.h for mt8192.
dt-binding/memory/mt8195-memory-port.h for mt8195.
power-domains:
maxItems: 1
......@@ -128,7 +134,6 @@ required:
- compatible
- reg
- interrupts
- mediatek,larbs
- '#iommu-cells'
allOf:
......@@ -140,7 +145,10 @@ allOf:
- mediatek,mt2701-m4u
- mediatek,mt2712-m4u
- mediatek,mt8173-m4u
- mediatek,mt8186-iommu-mm
- mediatek,mt8192-m4u
- mediatek,mt8195-iommu-vdo
- mediatek,mt8195-iommu-vpp
then:
required:
......@@ -150,12 +158,26 @@ allOf:
properties:
compatible:
enum:
- mediatek,mt8186-iommu-mm
- mediatek,mt8192-m4u
- mediatek,mt8195-iommu-vdo
- mediatek,mt8195-iommu-vpp
then:
required:
- power-domains
- if: # The IOMMUs don't have larbs.
not:
properties:
compatible:
contains:
const: mediatek,mt8195-iommu-infra
then:
required:
- mediatek,larbs
additionalProperties: false
examples:
......@@ -173,13 +195,3 @@ examples:
<&larb3>, <&larb4>, <&larb5>;
#iommu-cells = <1>;
};
- |
#include <dt-bindings/memory/mt8173-larb-port.h>
/* Example for a client device */
display {
compatible = "mediatek,mt8173-disp";
iommus = <&iommu M4U_PORT_DISP_OVL0>,
<&iommu M4U_PORT_DISP_RDMA0>;
};
......@@ -86,16 +86,6 @@ examples:
- |
#include <dt-bindings/clock/exynos5250.h>
gsc_0: scaler@13e00000 {
compatible = "samsung,exynos5-gsc";
reg = <0x13e00000 0x1000>;
interrupts = <0 85 0>;
power-domains = <&pd_gsc>;
clocks = <&clock CLK_GSCL0>;
clock-names = "gscl";
iommus = <&sysmmu_gsc0>;
};
sysmmu_gsc0: iommu@13e80000 {
compatible = "samsung,exynos-sysmmu";
reg = <0x13E80000 0x1000>;
......
......@@ -1375,14 +1375,6 @@ L: linux-input@vger.kernel.org
S: Odd fixes
F: drivers/input/mouse/bcm5974.c
APPLE DART IOMMU DRIVER
M: Sven Peter <sven@svenpeter.dev>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
L: iommu@lists.linux-foundation.org
S: Maintained
F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
F: drivers/iommu/apple-dart.c
APPLE PCIE CONTROLLER DRIVER
M: Alyssa Rosenzweig <alyssa@rosenzweig.io>
M: Marc Zyngier <maz@kernel.org>
......@@ -1834,6 +1826,7 @@ F: Documentation/devicetree/bindings/arm/apple/*
F: Documentation/devicetree/bindings/clock/apple,nco.yaml
F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml
F: Documentation/devicetree/bindings/interrupt-controller/apple,*
F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
F: Documentation/devicetree/bindings/iommu/apple,sart.yaml
F: Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
F: Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml
......@@ -1845,6 +1838,7 @@ F: arch/arm64/boot/dts/apple/
F: drivers/clk/clk-apple-nco.c
F: drivers/i2c/busses/i2c-pasemi-core.c
F: drivers/i2c/busses/i2c-pasemi-platform.c
F: drivers/iommu/apple-dart.c
F: drivers/irqchip/irq-apple-aic.c
F: drivers/mailbox/apple-mailbox.c
F: drivers/nvme/host/apple.c
......
......@@ -20,6 +20,10 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
......@@ -273,6 +277,36 @@ static void amba_shutdown(struct device *dev)
drv->shutdown(to_amba_device(dev));
}
static int amba_dma_configure(struct device *dev)
{
struct amba_driver *drv = to_amba_driver(dev->driver);
enum dev_dma_attr attr;
int ret = 0;
if (dev->of_node) {
ret = of_dma_configure(dev, dev->of_node, true);
} else if (has_acpi_companion(dev)) {
attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
ret = acpi_dma_configure(dev, attr);
}
if (!ret && !drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
}
return ret;
}
static void amba_dma_cleanup(struct device *dev)
{
struct amba_driver *drv = to_amba_driver(dev->driver);
if (!drv->driver_managed_dma)
iommu_device_unuse_default_domain(dev);
}
#ifdef CONFIG_PM
/*
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
......@@ -341,7 +375,8 @@ struct bus_type amba_bustype = {
.probe = amba_probe,
.remove = amba_remove,
.shutdown = amba_shutdown,
.dma_configure = platform_dma_configure,
.dma_configure = amba_dma_configure,
.dma_cleanup = amba_dma_cleanup,
.pm = &amba_pm,
};
EXPORT_SYMBOL_GPL(amba_bustype);
......
......@@ -671,6 +671,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
pinctrl_bind_failed:
device_links_no_driver(dev);
device_unbind_cleanup(dev);
......@@ -1199,6 +1201,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
device_remove(dev);
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
device_links_driver_cleanup(dev);
device_unbind_cleanup(dev);
......
......@@ -30,6 +30,8 @@
#include <linux/property.h>
#include <linux/kmemleak.h>
#include <linux/types.h>
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
#include "base.h"
#include "power/power.h"
......@@ -1454,9 +1456,9 @@ static void platform_shutdown(struct device *_dev)
drv->shutdown(dev);
}
int platform_dma_configure(struct device *dev)
static int platform_dma_configure(struct device *dev)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
enum dev_dma_attr attr;
int ret = 0;
......@@ -1467,9 +1469,23 @@ int platform_dma_configure(struct device *dev)
ret = acpi_dma_configure(dev, attr);
}
if (!ret && !drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
}
return ret;
}
static void platform_dma_cleanup(struct device *dev)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
if (!drv->driver_managed_dma)
iommu_device_unuse_default_domain(dev);
}
static const struct dev_pm_ops platform_dev_pm_ops = {
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
USE_PLATFORM_PM_SLEEP_OPS
......@@ -1484,6 +1500,7 @@ struct bus_type platform_bus_type = {
.remove = platform_remove,
.shutdown = platform_shutdown,
.dma_configure = platform_dma_configure,
.dma_cleanup = platform_dma_cleanup,
.pm = &platform_dev_pm_ops,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
......
......@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
#include "fsl-mc-private.h"
......@@ -140,15 +141,33 @@ static int fsl_mc_dma_configure(struct device *dev)
{
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
while (dev_is_fsl_mc(dma_dev))
dma_dev = dma_dev->parent;
if (dev_of_node(dma_dev))
return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
if (!ret && !mc_drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
}
return ret;
}
static void fsl_mc_dma_cleanup(struct device *dev)
{
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
if (!mc_drv->driver_managed_dma)
iommu_device_unuse_default_domain(dev);
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
......@@ -312,6 +331,7 @@ struct bus_type fsl_mc_bus_type = {
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
.dma_configure = fsl_mc_dma_configure,
.dma_cleanup = fsl_mc_dma_cleanup,
.dev_groups = fsl_mc_dev_groups,
.bus_groups = fsl_mc_bus_groups,
};
......
......@@ -407,6 +407,7 @@
/* IOMMU IVINFO */
#define IOMMU_IVINFO_OFFSET 36
#define IOMMU_IVINFO_EFRSUP BIT(0)
#define IOMMU_IVINFO_DMA_REMAP BIT(1)
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
......@@ -449,6 +450,9 @@ extern struct irq_remap_table **irq_lookup_table;
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
/* IVRS indicates that pre-boot remapping was enabled */
extern bool amdr_ivrs_remap_support;
/* kmem_cache to get tables with 128 byte alignement */
extern struct kmem_cache *amd_iommu_irq_cache;
......
......@@ -83,7 +83,7 @@
#define ACPI_DEVFLAG_LINT1 0x80
#define ACPI_DEVFLAG_ATSDIS 0x10000000
#define LOOP_TIMEOUT 100000
#define LOOP_TIMEOUT 2000000
/*
* ACPI table definitions
*
......@@ -181,6 +181,7 @@ u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly;
static bool amd_iommu_pc_present __read_mostly;
bool amdr_ivrs_remap_support __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
......@@ -325,6 +326,8 @@ static void __init early_iommu_features_init(struct amd_iommu *iommu,
{
if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
iommu->features = h->efr_reg;
if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
amdr_ivrs_remap_support = true;
}
/* Access to l1 and l2 indexed register spaces */
......@@ -1985,8 +1988,7 @@ static int __init amd_iommu_init_pci(void)
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
if (!ret)
print_iommu_info();
print_iommu_info();
out:
return ret;
......
......@@ -1838,20 +1838,10 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_complete(domain);
}
static void __init amd_iommu_init_dma_ops(void)
{
if (iommu_default_passthrough() || sme_me_mask)
x86_swiotlb_enable = true;
else
x86_swiotlb_enable = false;
}
int __init amd_iommu_init_api(void)
{
int err;
amd_iommu_init_dma_ops();
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
return err;
......@@ -2165,6 +2155,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
case IOMMU_CAP_PRE_BOOT_PROTECTION:
return amdr_ivrs_remap_support;
default:
break;
}
......@@ -2274,6 +2266,12 @@ static int amd_iommu_def_domain_type(struct device *dev)
return 0;
}
static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
/* IOMMU_PTE_FC is always set */
return true;
}
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
......@@ -2296,6 +2294,7 @@ const struct iommu_ops amd_iommu_ops = {
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
.free = amd_iommu_domain_free,
.enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
}
};
......
......@@ -956,6 +956,7 @@ static void __exit amd_iommu_v2_exit(void)
{
struct device_state *dev_state, *next;
unsigned long flags;
LIST_HEAD(freelist);
if (!amd_iommu_v2_supported())
return;
......@@ -975,11 +976,20 @@ static void __exit amd_iommu_v2_exit(void)
put_device_state(dev_state);
list_del(&dev_state->list);
free_device_state(dev_state);
list_add_tail(&dev_state->list, &freelist);
}
spin_unlock_irqrestore(&state_lock, flags);
/*
* Since free_device_state waits on the count to be zero,
* we need to free dev_state outside the spinlock.
*/
list_for_each_entry_safe(dev_state, next, &freelist, list) {
list_del(&dev_state->list);
free_device_state(dev_state);
}
destroy_workqueue(iommu_wq);
}
......
......@@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include "arm-smmu-v3.h"
......@@ -96,9 +97,14 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_ctx_desc *ret = NULL;
/* Don't free the mm until we release the ASID */
mmgrab(mm);
asid = arm64_mm_context_get(mm);
if (!asid)
return ERR_PTR(-ESRCH);
if (!asid) {
err = -ESRCH;
goto out_drop_mm;
}
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
......@@ -165,6 +171,8 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
kfree(cd);
out_put_context:
arm64_mm_context_put(mm);
out_drop_mm:
mmdrop(mm);
return err < 0 ? ERR_PTR(err) : ret;
}
......@@ -173,6 +181,7 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
if (arm_smmu_free_asid(cd)) {
/* Unpin ASID */
arm64_mm_context_put(cd->mm);
mmdrop(cd->mm);
kfree(cd);
}
}
......
......@@ -3770,6 +3770,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
if (resource_size(res) < arm_smmu_resource_size(smmu)) {
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
......
......@@ -211,7 +211,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
if (of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
of_device_is_compatible(np, "nvidia,tegra186-smmu"))
return nvidia_smmu_impl_init(smmu);
......
......@@ -408,6 +408,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sc7180-smmu-500" },
{ .compatible = "qcom,sc7280-smmu-500" },
{ .compatible = "qcom,sc8180x-smmu-500" },
{ .compatible = "qcom,sc8280xp-smmu-500" },
{ .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm6125-smmu-500" },
......
......@@ -1574,6 +1574,9 @@ static int arm_smmu_def_domain_type(struct device *dev)
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
const struct arm_smmu_impl *impl = cfg->smmu->impl;
if (using_legacy_binding)
return IOMMU_DOMAIN_IDENTITY;
if (impl && impl->def_domain_type)
return impl->def_domain_type(dev);
......@@ -2092,11 +2095,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
ioaddr = res->start;
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.
......
......@@ -20,6 +20,7 @@
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
......@@ -414,6 +415,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
return 0;
}
static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
const struct list_head *b)
{
struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
return res_a->res->start > res_b->res->start;
}
static int iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{
......@@ -432,6 +442,7 @@ static int iova_reserve_pci_windows(struct pci_dev *dev,
}
/* Get reserved DMA windows from host bridge */
list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
resource_list_for_each_entry(window, &bridge->dma_ranges) {
end = window->res->start - window->offset;
resv_iova:
......@@ -440,7 +451,7 @@ static int iova_reserve_pci_windows(struct pci_dev *dev,
hi = iova_pfn(iovad, end);
reserve_iova(iovad, lo, hi);
} else if (end < start) {
/* dma_ranges list should be sorted */
/* DMA ranges should be non-overlapping */
dev_err(&dev->dev,
"Failed to reserve IOVA [%pa-%pa]\n",
&start, &end);
......@@ -776,6 +787,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
dma_addr_t iova;
ssize_t ret;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
......@@ -813,8 +825,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
< size)
ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
if (ret < 0 || ret < size)
goto out_free_sg;
sgt->sgl->dma_address = iova;
......@@ -971,6 +983,11 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
void *padding_start;
size_t padding_size, aligned_size;
if (!is_swiotlb_active(dev)) {
dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
return DMA_MAPPING_ERROR;
}
aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
iova_mask(iovad), dir, attrs);
......@@ -1209,7 +1226,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* implementation - it knows better than we do.
*/
ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
if (ret < iova_len)
if (ret < 0 || ret < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
......
......@@ -11,6 +11,9 @@
#include <linux/fsl/guts.h>
#include <linux/interrupt.h>
#include <linux/genalloc.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <asm/mpc85xx.h>
......
......@@ -9,6 +9,7 @@
#include "fsl_pamu_domain.h"
#include <linux/platform_device.h>
#include <sysdev/fsl_pci.h>
/*
......
......@@ -533,33 +533,6 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
rcu_read_unlock();
}
static bool domain_update_iommu_snooping(struct intel_iommu *skip)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool ret = true;
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (iommu != skip) {
/*
* If the hardware is operating in the scalable mode,
* the snooping control is always supported since we
* always set PASID-table-entry.PGSNP bit if the domain
* is managed outside (UNMANAGED).
*/
if (!sm_supported(iommu) &&
!ecap_sc_support(iommu->ecap)) {
ret = false;
break;
}
}
}
rcu_read_unlock();
return ret;
}
static int domain_update_iommu_superpage(struct dmar_domain *domain,
struct intel_iommu *skip)
{
......@@ -641,7 +614,6 @@ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain->iommu_snooping = domain_update_iommu_snooping(NULL);
domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
/*
......@@ -2460,7 +2432,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
if (level == 5)
flags |= PASID_FLAG_FL5LP;
if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
if (domain->force_snooping)
flags |= PASID_FLAG_PAGE_SNOOP;
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
......@@ -2474,64 +2446,6 @@ static bool dev_is_real_dma_subdevice(struct device *dev)
pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
}
static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
int bus, int devfn,
struct device *dev,
struct dmar_domain *domain)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
unsigned long flags;
int ret;
spin_lock_irqsave(&device_domain_lock, flags);
info->domain = domain;
spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
spin_unlock(&iommu->lock);
if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
return NULL;
}
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
dmar_remove_one_dev_info(dev);
return NULL;
}
/* Setup the PASID entry for requests without PASID: */
spin_lock_irqsave(&iommu->lock, flags);
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID);
else if (domain_use_first_level(domain))
ret = domain_setup_first_level(iommu, domain, dev,
PASID_RID2PASID);
else
ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID);
spin_unlock_irqrestore(&iommu->lock, flags);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
return NULL;
}
}
if (dev && domain_context_mapping(domain, dev)) {
dev_err(dev, "Domain context map failed\n");
dmar_remove_one_dev_info(dev);
return NULL;
}
return domain;
}
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long first_vpfn,
unsigned long last_vpfn)
......@@ -2607,17 +2521,62 @@ static int __init si_domain_init(int hw)
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct dmar_domain *ndomain;
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
unsigned long flags;
u8 bus, devfn;
int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
if (ndomain != domain)
return -EBUSY;
spin_lock_irqsave(&device_domain_lock, flags);
info->domain = domain;
spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
spin_unlock(&iommu->lock);
if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
dmar_remove_one_dev_info(dev);
return ret;
}
/* Setup the PASID entry for requests without PASID: */
spin_lock_irqsave(&iommu->lock, flags);
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID);
else if (domain_use_first_level(domain))
ret = domain_setup_first_level(iommu, domain, dev,
PASID_RID2PASID);
else
ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID);
spin_unlock_irqrestore(&iommu->lock, flags);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
return ret;
}
}
ret = domain_context_mapping(domain, dev);
if (ret) {
dev_err(dev, "Domain context map failed\n");
dmar_remove_one_dev_info(dev);
return ret;
}
return 0;
}
......@@ -3607,12 +3566,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
iommu->name);
return -ENXIO;
}
if (!ecap_sc_support(iommu->ecap) &&
domain_update_iommu_snooping(iommu)) {
pr_warn("%s: Doesn't support snooping.\n",
iommu->name);
return -ENXIO;
}
sp = domain_update_iommu_superpage(NULL, iommu) - 1;
if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
pr_warn("%s: Doesn't support large page.\n",
......@@ -4304,7 +4258,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->agaw = width_to_agaw(adjust_width);
domain->iommu_coherency = false;
domain->iommu_snooping = false;
domain->iommu_superpage = 0;
domain->max_addr = 0;
......@@ -4369,6 +4322,9 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
if (!iommu)
return -ENODEV;
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EOPNOTSUPP;
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
......@@ -4443,7 +4399,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE)
prot |= DMA_PTE_WRITE;
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
if (dmar_domain->set_pte_snp)
prot |= DMA_PTE_SNP;
max_addr = iova + size;
......@@ -4566,12 +4522,71 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
static bool domain_support_force_snooping(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool support = true;
assert_spin_locked(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link) {
if (!ecap_sc_support(info->iommu->ecap)) {
support = false;
break;
}
}
return support;
}
static void domain_set_force_snooping(struct dmar_domain *domain)
{
struct device_domain_info *info;
assert_spin_locked(&device_domain_lock);
/*
* Second level page table supports per-PTE snoop control. The
* iommu_map() interface will handle this by setting SNP bit.
*/
if (!domain_use_first_level(domain)) {
domain->set_pte_snp = true;
return;
}
list_for_each_entry(info, &domain->devices, link)
intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
PASID_RID2PASID);
}
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long flags;
if (dmar_domain->force_snooping)
return true;
spin_lock_irqsave(&device_domain_lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
spin_unlock_irqrestore(&device_domain_lock, flags);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
spin_unlock_irqrestore(&device_domain_lock, flags);
return true;
}
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
return domain_update_iommu_snooping(NULL);
return true;
if (cap == IOMMU_CAP_INTR_REMAP)
return irq_remapping_enabled == 1;
if (cap == IOMMU_CAP_PRE_BOOT_PROTECTION)
return dmar_platform_optin();
return false;
}
......@@ -4919,6 +4934,7 @@ const struct iommu_ops intel_iommu_ops = {
.iotlb_sync = intel_iommu_tlb_sync,
.iova_to_phys = intel_iommu_iova_to_phys,
.free = intel_iommu_domain_free,
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
}
};
......
......@@ -710,9 +710,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
pasid_set_pgsnp(pte);
/*
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
......@@ -762,3 +759,45 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return 0;
}
/*
* Set the page snoop control for a pasid entry which has been set up.
*/
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid)
{
struct pasid_entry *pte;
u16 did;
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (WARN_ON(!pte || !pasid_pte_is_present(pte))) {
spin_unlock(&iommu->lock);
return;
}
pasid_set_pgsnp(pte);
did = pasid_get_domain_id(pte);
spin_unlock(&iommu->lock);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
/*
* VT-d spec 3.4 table23 states guides for cache invalidation:
*
* - PASID-selective-within-Domain PASID-cache invalidation
* - PASID-selective PASID-based IOTLB invalidation
* - If (pasid is RID_PASID)
* - Global Device-TLB invalidation to affected functions
* Else
* - PASID-based Device-TLB invalidation (with S=1 and
* Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
*/
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
devtlb_invalidation_with_pasid(iommu, dev, pasid);
}
......@@ -123,4 +123,6 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
bool fault_ignore);
int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
#endif /* __INTEL_PASID_H */
This diff is collapsed.
......@@ -583,7 +583,7 @@ static void print_ctx_regs(void __iomem *base, int ctx)
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
}
static void insert_iommu_master(struct device *dev,
static int insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
struct of_phandle_args *spec)
{
......@@ -592,6 +592,10 @@ static void insert_iommu_master(struct device *dev,
if (list_empty(&(*iommu)->ctx_list)) {
master = kzalloc(sizeof(*master), GFP_ATOMIC);
if (!master) {
dev_err(dev, "Failed to allocate iommu_master\n");
return -ENOMEM;
}
master->of_node = dev->of_node;
list_add(&master->list, &(*iommu)->ctx_list);
dev_iommu_priv_set(dev, master);
......@@ -601,30 +605,34 @@ static void insert_iommu_master(struct device *dev,
if (master->mids[sid] == spec->args[0]) {
dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
sid);
return;
return 0;
}
master->mids[master->num_mids++] = spec->args[0];
return 0;
}
static int qcom_iommu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
struct msm_iommu_dev *iommu;
struct msm_iommu_dev *iommu = NULL, *iter;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
if (iommu->dev->of_node == spec->np)
list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
if (iter->dev->of_node == spec->np) {
iommu = iter;
break;
}
}
if (!iommu || iommu->dev->of_node != spec->np) {
if (!iommu) {
ret = -ENODEV;
goto fail;
}
insert_iommu_master(dev, &iommu, spec);
ret = insert_iommu_master(dev, &iommu, spec);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Honghui Zhang <honghui.zhang@mediatek.com>
*/
#ifndef _MTK_IOMMU_H_
#define _MTK_IOMMU_H_
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <soc/mediatek/smi.h>
#include <dt-bindings/memory/mtk-memory-port.h>
#define MTK_LARB_COM_MAX 8
#define MTK_LARB_SUBCOM_MAX 4
#define MTK_IOMMU_GROUP_MAX 8
struct mtk_iommu_suspend_reg {
union {
u32 standard_axi_mode;/* v1 */
u32 misc_ctrl;/* v2 */
};
u32 dcm_dis;
u32 ctrl_reg;
u32 int_control0;
u32 int_main_control;
u32 ivrp_paddr;
u32 vld_pa_rng;
u32 wr_len_ctrl;
};
enum mtk_iommu_plat {
M4U_MT2701,
M4U_MT2712,
M4U_MT6779,
M4U_MT8167,
M4U_MT8173,
M4U_MT8183,
M4U_MT8192,
};
struct mtk_iommu_iova_region;
struct mtk_iommu_plat_data {
enum mtk_iommu_plat m4u_plat;
u32 flags;
u32 inv_sel_reg;
unsigned int iova_region_nr;
const struct mtk_iommu_iova_region *iova_region;
unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
};
struct mtk_iommu_domain;
struct mtk_iommu_data {
void __iomem *base;
int irq;
struct device *dev;
struct clk *bclk;
phys_addr_t protect_base; /* protect memory base */
struct mtk_iommu_suspend_reg reg;
struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX];
bool enable_4GB;
spinlock_t tlb_lock; /* lock for tlb range flush */
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
struct device *smicomm_dev;
struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
struct list_head list;
struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
};
static inline int mtk_iommu_bind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
return component_bind_all(dev, &data->larb_imu);
}
static inline void mtk_iommu_unbind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
component_unbind_all(dev, &data->larb_imu);
}
#endif
This diff is collapsed.
......@@ -99,7 +99,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
if (!domain_device)
return -ENOMEM;
if (zdev->dma_table) {
if (zdev->dma_table && !zdev->s390_domain) {
cc = zpci_dma_exit_device(zdev);
if (cc) {
rc = -EIO;
......@@ -107,6 +107,9 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
}
}
if (zdev->s390_domain)
zpci_unregister_ioat(zdev, 0);
zdev->dma_table = s390_domain->dma_table;
cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table));
......@@ -136,7 +139,13 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
return 0;
out_restore:
zpci_dma_init_device(zdev);
if (!zdev->s390_domain) {
zpci_dma_init_device(zdev);
} else {
zdev->dma_table = zdev->s390_domain->dma_table;
zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(zdev->dma_table));
}
out_free:
kfree(domain_device);
......@@ -167,7 +176,7 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
}
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
if (found) {
if (found && (zdev->s390_domain == s390_domain)) {
zdev->s390_domain = NULL;
zpci_unregister_ioat(zdev, 0);
zpci_dma_init_device(zdev);
......
......@@ -369,7 +369,6 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
dev_dbg(dev, "Parsing dma-ranges property...\n");
for_each_of_pci_range(&parser, &range) {
struct resource_entry *entry;
/*
* If we failed translation or got a zero-sized region
* then skip this range
......@@ -393,12 +392,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
goto failed;
}
/* Keep the resource list sorted */
resource_list_for_each_entry(entry, ib_resources)
if (entry->res->start > res->start)
break;
pci_add_resource_offset(&entry->node, res,
pci_add_resource_offset(ib_resources, res,
res->start - range.pci_addr);
}
......
......@@ -20,6 +20,7 @@
#include <linux/of_device.h>
#include <linux/acpi.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include "pci.h"
#include "pcie/portdrv.h"
......@@ -1620,6 +1621,7 @@ static int pci_bus_num_vf(struct device *dev)
*/
static int pci_dma_configure(struct device *dev)
{
struct pci_driver *driver = to_pci_driver(dev->driver);
struct device *bridge;
int ret = 0;
......@@ -1635,9 +1637,24 @@ static int pci_dma_configure(struct device *dev)
}
pci_put_host_bridge_device(bridge);
if (!ret && !driver->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
}
return ret;
}
static void pci_dma_cleanup(struct device *dev)
{
struct pci_driver *driver = to_pci_driver(dev->driver);
if (!driver->driver_managed_dma)
iommu_device_unuse_default_domain(dev);
}
struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
......@@ -1651,6 +1668,7 @@ struct bus_type pci_bus_type = {
.pm = PCI_PM_OPS_PTR,
.num_vf = pci_bus_num_vf,
.dma_configure = pci_dma_configure,
.dma_cleanup = pci_dma_cleanup,
};
EXPORT_SYMBOL(pci_bus_type);
......
......@@ -36,6 +36,7 @@ static struct pci_driver stub_driver = {
.name = "pci-stub",
.id_table = NULL, /* only dynamic id's */
.probe = pci_stub_probe,
.driver_managed_dma = true,
};
static int __init pci_stub_init(void)
......
......@@ -202,6 +202,8 @@ static struct pci_driver pcie_portdriver = {
.err_handler = &pcie_portdrv_err_handler,
.driver_managed_dma = true,
.driver.pm = PCIE_PORTDRV_PM_OPS,
};
......
......@@ -7,9 +7,7 @@
*/
#include <linux/device.h>
#include <linux/dmar.h>
#include <linux/idr.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
......@@ -257,13 +255,9 @@ static ssize_t iommu_dma_protection_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
/*
* Kernel DMA protection is a feature where Thunderbolt security is
* handled natively using IOMMU. It is enabled when IOMMU is
* enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
*/
return sprintf(buf, "%d\n",
iommu_present(&pci_bus_type) && dmar_platform_optin());
struct tb *tb = container_of(dev, struct tb, dev);
return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
}
static DEVICE_ATTR_RO(iommu_dma_protection);
......
......@@ -15,9 +15,11 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/string_helpers.h>
#include "nhi.h"
#include "nhi_regs.h"
......@@ -1103,6 +1105,47 @@ static void nhi_check_quirks(struct tb_nhi *nhi)
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
}
static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
{
if (!pdev->external_facing ||
!device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION))
return 0;
*(bool *)data = true;
return 1; /* Stop walking */
}
static void nhi_check_iommu(struct tb_nhi *nhi)
{
struct pci_bus *bus = nhi->pdev->bus;
bool port_ok = false;
/*
* Ideally what we'd do here is grab every PCI device that
* represents a tunnelling adapter for this NHI and check their
* status directly, but unfortunately USB4 seems to make it
* obnoxiously difficult to reliably make any correlation.
*
* So for now we'll have to bodge it... Hoping that the system
* is at least sane enough that an adapter is in the same PCI
* segment as its NHI, if we can find *something* on that segment
* which meets the requirements for Kernel DMA Protection, we'll
* take that to imply that firmware is aware and has (hopefully)
* done the right thing in general. We need to know that the PCI
* layer has seen the ExternalFacingPort property which will then
* inform the IOMMU layer to enforce the complete "untrusted DMA"
* flow, but also that the IOMMU driver itself can be trusted not
* to have been subverted by a pre-boot DMA attack.
*/
while (bus->parent)
bus = bus->parent;
pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok);
nhi->iommu_dma_protection = port_ok;
dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n",
str_enabled_disabled(port_ok));
}
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
......@@ -1220,6 +1263,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
res = nhi_init_msi(nhi);
if (res) {
......
......@@ -588,6 +588,7 @@ static struct fsl_mc_driver vfio_fsl_mc_driver = {
.name = "vfio-fsl-mc",
.owner = THIS_MODULE,
},
.driver_managed_dma = true,
};
static int __init vfio_fsl_mc_driver_init(void)
......
......@@ -194,6 +194,7 @@ static struct pci_driver vfio_pci_driver = {
.remove = vfio_pci_remove,
.sriov_configure = vfio_pci_sriov_configure,
.err_handler = &vfio_pci_core_err_handlers,
.driver_managed_dma = true,
};
static void __init vfio_pci_fill_ids(void)
......
......@@ -95,6 +95,7 @@ static struct amba_driver vfio_amba_driver = {
.name = "vfio-amba",
.owner = THIS_MODULE,
},
.driver_managed_dma = true,
};
module_amba_driver(vfio_amba_driver);
......
......@@ -76,6 +76,7 @@ static struct platform_driver vfio_platform_driver = {
.driver = {
.name = "vfio-platform",
},
.driver_managed_dma = true,
};
module_platform_driver(vfio_platform_driver);
......
This diff is collapsed.
......@@ -84,8 +84,8 @@ struct vfio_domain {
struct iommu_domain *domain;
struct list_head next;
struct list_head group_list;
int prot; /* IOMMU_CACHE */
bool fgsp; /* Fine-grained super pages */
bool fgsp : 1; /* Fine-grained super pages */
bool enforce_cache_coherency : 1;
};
struct vfio_dma {
......@@ -1461,7 +1461,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
list_for_each_entry(d, &iommu->domain_list, next) {
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
npage << PAGE_SHIFT, prot | d->prot);
npage << PAGE_SHIFT, prot | IOMMU_CACHE);
if (ret)
goto unwind;
......@@ -1771,7 +1771,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
}
ret = iommu_map(domain->domain, iova, phys,
size, dma->prot | domain->prot);
size, dma->prot | IOMMU_CACHE);
if (ret) {
if (!dma->iommu_mapped) {
vfio_unpin_pages_remote(dma, iova,
......@@ -1859,7 +1859,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
return;
ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
IOMMU_READ | IOMMU_WRITE | domain->prot);
IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
......@@ -2267,8 +2267,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_detach;
}
if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
domain->prot |= IOMMU_CACHE;
/*
* If the IOMMU can block non-coherent operations (ie PCIe TLPs with
* no-snoop set) then VFIO always turns this feature on because on Intel
* platforms it optimizes KVM to disable wbinvd emulation.
*/
if (domain->domain->ops->enforce_cache_coherency)
domain->enforce_cache_coherency =
domain->domain->ops->enforce_cache_coherency(
domain->domain);
/*
* Try to match an existing compatible domain. We don't want to
......@@ -2279,7 +2286,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
*/
list_for_each_entry(d, &iommu->domain_list, next) {
if (d->domain->ops == domain->domain->ops &&
d->prot == domain->prot) {
d->enforce_cache_coherency ==
domain->enforce_cache_coherency) {
iommu_detach_group(domain->domain, group->iommu_group);
if (!iommu_attach_group(d->domain,
group->iommu_group)) {
......@@ -2611,14 +2619,14 @@ static void vfio_iommu_type1_release(void *iommu_data)
kfree(iommu);
}
static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
int ret = 1;
mutex_lock(&iommu->lock);
list_for_each_entry(domain, &iommu->domain_list, next) {
if (!(domain->prot & IOMMU_CACHE)) {
if (!(domain->enforce_cache_coherency)) {
ret = 0;
break;
}
......@@ -2641,7 +2649,7 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
return vfio_domains_have_iommu_cache(iommu);
return vfio_domains_have_enforce_cache_coherency(iommu);
default:
return 0;
}
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 MediaTek Inc.
*
* Author: Anan Sun <anan.sun@mediatek.com>
* Author: Yong Wu <yong.wu@mediatek.com>
*/
#ifndef _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
#define _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
#include <dt-bindings/memory/mtk-memory-port.h>
/*
* MM IOMMU supports 16GB dma address. We separate it to four ranges:
* 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
* locate in anyone region. BUT:
* a) Make sure all the ports inside a larb are in one range.
* b) The iova of any master can NOT cross the 4G/8G/12G boundary.
*
* This is the suggested mapping in this SoC:
*
* modules dma-address-region larbs-ports
* disp 0 ~ 4G larb0/1/2
* vcodec 4G ~ 8G larb4/7
* cam/mdp 8G ~ 12G the other larbs.
* N/A 12G ~ 16G
* CCU0 0x24000_0000 ~ 0x243ff_ffff larb13: port 9/10
* CCU1 0x24400_0000 ~ 0x247ff_ffff larb14: port 4/5
*/
/* MM IOMMU ports */
/* LARB 0 -- MMSYS */
#define IOMMU_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0)
#define IOMMU_PORT_L0_REVERSED MTK_M4U_ID(0, 1)
#define IOMMU_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2)
#define IOMMU_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 3)
/* LARB 1 -- MMSYS */
#define IOMMU_PORT_L1_DISP_RDMA1 MTK_M4U_ID(1, 0)
#define IOMMU_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 1)
#define IOMMU_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 2)
#define IOMMU_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 3)
#define IOMMU_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 4)
/* LARB 2 -- MMSYS */
#define IOMMU_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
#define IOMMU_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1)
#define IOMMU_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2)
#define IOMMU_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3)
#define IOMMU_PORT_L2_DISP_FAKE0 MTK_M4U_ID(2, 4)
/* LARB 4 -- VDEC */
#define IOMMU_PORT_L4_HW_VDEC_MC_EXT MTK_M4U_ID(4, 0)
#define IOMMU_PORT_L4_HW_VDEC_UFO_EXT MTK_M4U_ID(4, 1)
#define IOMMU_PORT_L4_HW_VDEC_PP_EXT MTK_M4U_ID(4, 2)
#define IOMMU_PORT_L4_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3)
#define IOMMU_PORT_L4_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4)
#define IOMMU_PORT_L4_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5)
#define IOMMU_PORT_L4_HW_VDEC_TILE_EXT MTK_M4U_ID(4, 6)
#define IOMMU_PORT_L4_HW_VDEC_VLD_EXT MTK_M4U_ID(4, 7)
#define IOMMU_PORT_L4_HW_VDEC_VLD2_EXT MTK_M4U_ID(4, 8)
#define IOMMU_PORT_L4_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9)
#define IOMMU_PORT_L4_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(4, 10)
#define IOMMU_PORT_L4_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 11)
#define IOMMU_PORT_L4_HW_MINI_MDP_R0_EXT MTK_M4U_ID(4, 12)
#define IOMMU_PORT_L4_HW_MINI_MDP_W0_EXT MTK_M4U_ID(4, 13)
/* LARB 7 -- VENC */
#define IOMMU_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0)
#define IOMMU_PORT_L7_VENC_REC MTK_M4U_ID(7, 1)
#define IOMMU_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2)
#define IOMMU_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3)
#define IOMMU_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4)
#define IOMMU_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5)
#define IOMMU_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6)
#define IOMMU_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7)
#define IOMMU_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8)
#define IOMMU_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9)
#define IOMMU_PORT_L7_JPGENC_C_RDMA MTK_M4U_ID(7, 10)
#define IOMMU_PORT_L7_JPGENC_Q_TABLE MTK_M4U_ID(7, 11)
#define IOMMU_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12)
/* LARB 8 -- WPE */
#define IOMMU_PORT_L8_WPE_RDMA_0 MTK_M4U_ID(8, 0)
#define IOMMU_PORT_L8_WPE_RDMA_1 MTK_M4U_ID(8, 1)
#define IOMMU_PORT_L8_WPE_WDMA_0 MTK_M4U_ID(8, 2)
/* LARB 9 -- IMG-1 */
#define IOMMU_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0)
#define IOMMU_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1)
#define IOMMU_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2)
#define IOMMU_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3)
#define IOMMU_PORT_L9_IMG_LCE_D1 MTK_M4U_ID(9, 4)
#define IOMMU_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5)
#define IOMMU_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6)
#define IOMMU_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7)
#define IOMMU_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8)
#define IOMMU_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9)
#define IOMMU_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10)
#define IOMMU_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11)
#define IOMMU_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12)
#define IOMMU_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13)
#define IOMMU_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14)
#define IOMMU_PORT_L9_IMG_WPE_RDMA1 MTK_M4U_ID(9, 15)
#define IOMMU_PORT_L9_IMG_WPE_RDMA0 MTK_M4U_ID(9, 16)
#define IOMMU_PORT_L9_IMG_WPE_WDMA MTK_M4U_ID(9, 17)
#define IOMMU_PORT_L9_IMG_MFB_RDMA0 MTK_M4U_ID(9, 18)
#define IOMMU_PORT_L9_IMG_MFB_RDMA1 MTK_M4U_ID(9, 19)
#define IOMMU_PORT_L9_IMG_MFB_RDMA2 MTK_M4U_ID(9, 20)
#define IOMMU_PORT_L9_IMG_MFB_RDMA3 MTK_M4U_ID(9, 21)
#define IOMMU_PORT_L9_IMG_MFB_RDMA4 MTK_M4U_ID(9, 22)
#define IOMMU_PORT_L9_IMG_MFB_RDMA5 MTK_M4U_ID(9, 23)
#define IOMMU_PORT_L9_IMG_MFB_WDMA0 MTK_M4U_ID(9, 24)
#define IOMMU_PORT_L9_IMG_MFB_WDMA1 MTK_M4U_ID(9, 25)
#define IOMMU_PORT_L9_IMG_RESERVE6 MTK_M4U_ID(9, 26)
#define IOMMU_PORT_L9_IMG_RESERVE7 MTK_M4U_ID(9, 27)
#define IOMMU_PORT_L9_IMG_RESERVE8 MTK_M4U_ID(9, 28)
/* LARB 11 -- IMG-2 */
#define IOMMU_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0)
#define IOMMU_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1)
#define IOMMU_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2)
#define IOMMU_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3)
#define IOMMU_PORT_L11_IMG_LCE_D1 MTK_M4U_ID(11, 4)
#define IOMMU_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5)
#define IOMMU_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6)
#define IOMMU_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7)
#define IOMMU_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8)
#define IOMMU_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9)
#define IOMMU_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10)
#define IOMMU_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11)
#define IOMMU_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12)
#define IOMMU_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13)
#define IOMMU_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14)
#define IOMMU_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15)
#define IOMMU_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16)
#define IOMMU_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17)
#define IOMMU_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18)
#define IOMMU_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19)
#define IOMMU_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20)
#define IOMMU_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21)
#define IOMMU_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22)
#define IOMMU_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23)
#define IOMMU_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24)
#define IOMMU_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25)
#define IOMMU_PORT_L11_IMG_RESERVE6 MTK_M4U_ID(11, 26)
#define IOMMU_PORT_L11_IMG_RESERVE7 MTK_M4U_ID(11, 27)
#define IOMMU_PORT_L11_IMG_RESERVE8 MTK_M4U_ID(11, 28)
/* LARB 13 -- CAM */
#define IOMMU_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0)
#define IOMMU_PORT_L13_CAM_MRAWO_0 MTK_M4U_ID(13, 1)
#define IOMMU_PORT_L13_CAM_MRAWO_1 MTK_M4U_ID(13, 2)
#define IOMMU_PORT_L13_CAM_CAMSV_4 MTK_M4U_ID(13, 6)
#define IOMMU_PORT_L13_CAM_CAMSV_5 MTK_M4U_ID(13, 7)
#define IOMMU_PORT_L13_CAM_CAMSV_6 MTK_M4U_ID(13, 8)
#define IOMMU_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9)
#define IOMMU_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10)
#define IOMMU_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11)
/* LARB 14 -- CAM */
#define IOMMU_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4)
#define IOMMU_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5)
/* LARB 16 -- RAW-A */
#define IOMMU_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0)
#define IOMMU_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1)
#define IOMMU_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2)
#define IOMMU_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3)
#define IOMMU_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4)
#define IOMMU_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5)
#define IOMMU_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6)
#define IOMMU_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7)
#define IOMMU_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8)
#define IOMMU_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9)
#define IOMMU_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10)
#define IOMMU_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11)
#define IOMMU_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12)
#define IOMMU_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13)
#define IOMMU_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14)
#define IOMMU_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15)
#define IOMMU_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16)
/* LARB 17 -- RAW-B */
#define IOMMU_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0)
#define IOMMU_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1)
#define IOMMU_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2)
#define IOMMU_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3)
#define IOMMU_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4)
#define IOMMU_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5)
#define IOMMU_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6)
#define IOMMU_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7)
#define IOMMU_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8)
#define IOMMU_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9)
#define IOMMU_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10)
#define IOMMU_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11)
#define IOMMU_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12)
#define IOMMU_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13)
#define IOMMU_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14)
#define IOMMU_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15)
#define IOMMU_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16)
/* LARB 19 -- IPE */
#define IOMMU_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0)
#define IOMMU_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1)
#define IOMMU_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2)
#define IOMMU_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3)
/* LARB 20 -- IPE */
#define IOMMU_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0)
#define IOMMU_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1)
#define IOMMU_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2)
#define IOMMU_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3)
#define IOMMU_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4)
#define IOMMU_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5)
#endif
This diff is collapsed.
......@@ -12,4 +12,6 @@
#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x1f)
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
#define MTK_IFAIOMMU_PERI_ID(port) MTK_M4U_ID(0, port)
#endif
......@@ -79,6 +79,14 @@ struct amba_driver {
void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
/*
* For most device drivers, no need to care about this flag as long as
* all DMAs are handled through the kernel DMA API. For some special
* ones, for example VFIO drivers, they know how to manage the DMA
* themselves and set this flag so that the IOMMU layer will allow them
* to setup and manage their own I/O address space.
*/
bool driver_managed_dma;
};
/*
......
......@@ -59,6 +59,8 @@ struct fwnode_handle;
* bus supports.
* @dma_configure: Called to setup DMA configuration on a device on
* this bus.
* @dma_cleanup: Called to cleanup DMA configuration on a device on
* this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
......@@ -103,6 +105,7 @@ struct bus_type {
int (*num_vf)(struct device *dev);
int (*dma_configure)(struct device *dev);
void (*dma_cleanup)(struct device *dev);
const struct dev_pm_ops *pm;
......
This diff is collapsed.
......@@ -539,7 +539,8 @@ struct dmar_domain {
u8 has_iotlb_device: 1;
u8 iommu_coherency: 1; /* indicate coherency of iommu access */
u8 iommu_snooping: 1; /* indicate snooping control feature */
u8 force_snooping : 1; /* Create IOPTEs with snoop control */
u8 set_pte_snp:1;
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment