Commit 7e3c3883 authored by Joerg Roedel's avatar Joerg Roedel

Merge branches 'arm/allwinner', 'arm/mediatek', 'arm/renesas', 'arm/tegra',...

Merge branches 'arm/allwinner', 'arm/mediatek', 'arm/renesas', 'arm/tegra', 'arm/qcom', 'arm/smmu', 'ppc/pamu', 'x86/amd', 'x86/vt-d' and 'core' into next
......@@ -61,6 +61,7 @@ Required properties:
"mediatek,mt6779-m4u" for mt6779 which uses generation two m4u HW.
"mediatek,mt7623-m4u", "mediatek,mt2701-m4u" for mt7623 which uses
generation one m4u HW.
"mediatek,mt8167-m4u" for mt8167 which uses generation two m4u HW.
"mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW.
"mediatek,mt8183-m4u" for mt8183 which uses generation two m4u HW.
- reg : m4u register base and size.
......@@ -80,6 +81,7 @@ Required properties:
dt-binding/memory/mt2701-larb-port.h for mt2701, mt7623
dt-binding/memory/mt2712-larb-port.h for mt2712,
dt-binding/memory/mt6779-larb-port.h for mt6779,
dt-binding/memory/mt8167-larb-port.h for mt8167,
dt-binding/memory/mt8173-larb-port.h for mt8173, and
dt-binding/memory/mt8183-larb-port.h for mt8183.
......
......@@ -20,6 +20,7 @@ properties:
- items:
- enum:
- renesas,ipmmu-r8a73a4 # R-Mobile APE6
- renesas,ipmmu-r8a7742 # RZ/G1H
- renesas,ipmmu-r8a7743 # RZ/G1M
- renesas,ipmmu-r8a7744 # RZ/G1N
- renesas,ipmmu-r8a7745 # RZ/G1E
......@@ -32,8 +33,8 @@ properties:
- enum:
- renesas,ipmmu-r8a774a1 # RZ/G2M
- renesas,ipmmu-r8a774b1 # RZ/G2N
- renesas,ipmmu-r8a774e1 # RZ/G2H
- renesas,ipmmu-r8a774c0 # RZ/G2E
- renesas,ipmmu-r8a774e1 # RZ/G2H
- renesas,ipmmu-r8a7795 # R-Car H3
- renesas,ipmmu-r8a7796 # R-Car M3-W
- renesas,ipmmu-r8a77961 # R-Car M3-W+
......
.. SPDX-License-Identifier: GPL-2.0
.. iommu:
=====================================
IOMMU Userspace API
=====================================
IOMMU UAPI is used for virtualization cases where communications are
needed between physical and virtual IOMMU drivers. For baremetal
usage, the IOMMU is a system device which does not need to communicate
with userspace directly.
The primary use cases are guest Shared Virtual Address (SVA) and
guest IO virtual address (IOVA), wherein the vIOMMU implementation
relies on the physical IOMMU and for this reason requires interactions
with the host driver.
.. contents:: :local:
Functionalities
===============
Communications of user and kernel involve both directions. The
supported user-kernel APIs are as follows:
1. Bind/Unbind guest PASID (e.g. Intel VT-d)
2. Bind/Unbind guest PASID table (e.g. ARM SMMU)
3. Invalidate IOMMU caches upon guest requests
4. Report errors to the guest and serve page requests
Requirements
============
The IOMMU UAPIs are generic and extensible to meet the following
requirements:
1. Emulated and para-virtualised vIOMMUs
2. Multiple vendors (Intel VT-d, ARM SMMU, etc.)
3. Extensions to the UAPI shall not break existing userspace
Interfaces
==========
Although the data structures defined in IOMMU UAPI are self-contained,
there are no user API functions introduced. Instead, IOMMU UAPI is
designed to work with existing user driver frameworks such as VFIO.
Extension Rules & Precautions
-----------------------------
When IOMMU UAPI gets extended, the data structures can *only* be
modified in two ways:
1. Adding new fields by re-purposing the padding[] field. No size change.
2. Adding new union members at the end. May increase the structure sizes.
No new fields can be added *after* the variable sized union in that it
will break backward compatibility when offset moves. A new flag must
be introduced whenever a change affects the structure using either
method. The IOMMU driver processes the data based on flags which
ensures backward compatibility.
Version field is only reserved for the unlikely event of UAPI upgrade
at its entirety.
It's *always* the caller's responsibility to indicate the size of the
structure passed by setting argsz appropriately.
Though at the same time, argsz is user provided data which is not
trusted. The argsz field allows the user app to indicate how much data
it is providing; it's still the kernel's responsibility to validate
whether it's correct and sufficient for the requested operation.
Compatibility Checking
----------------------
When IOMMU UAPI extension results in some structure size increase,
IOMMU UAPI code shall handle the following cases:
1. User and kernel has exact size match
2. An older user with older kernel header (smaller UAPI size) running on a
newer kernel (larger UAPI size)
3. A newer user with newer kernel header (larger UAPI size) running
on an older kernel.
4. A malicious/misbehaving user passing illegal/invalid size but within
range. The data may contain garbage.
Feature Checking
----------------
While launching a guest with vIOMMU, it is strongly advised to check
the compatibility upfront, as some subsequent errors happening during
vIOMMU operation, such as cache invalidation failures cannot be nicely
escalated to the guest due to IOMMU specifications. This can lead to
catastrophic failures for the users.
User applications such as QEMU are expected to import kernel UAPI
headers. Backward compatibility is supported per feature flags.
For example, an older QEMU (with older kernel header) can run on newer
kernel. Newer QEMU (with new kernel header) may refuse to initialize
on an older kernel if new feature flags are not supported by older
kernel. Simply recompiling existing code with newer kernel header should
not be an issue in that only existing flags are used.
IOMMU vendor driver should report the below features to IOMMU UAPI
consumers (e.g. via VFIO).
1. IOMMU_NESTING_FEAT_SYSWIDE_PASID
2. IOMMU_NESTING_FEAT_BIND_PGTBL
3. IOMMU_NESTING_FEAT_BIND_PASID_TABLE
4. IOMMU_NESTING_FEAT_CACHE_INVLD
5. IOMMU_NESTING_FEAT_PAGE_REQUEST
Take VFIO as example, upon request from VFIO userspace (e.g. QEMU),
VFIO kernel code shall query IOMMU vendor driver for the support of
the above features. Query result can then be reported back to the
userspace caller. Details can be found in
Documentation/driver-api/vfio.rst.
Data Passing Example with VFIO
------------------------------
As the ubiquitous userspace driver framework, VFIO is already IOMMU
aware and shares many key concepts such as device model, group, and
protection domain. Other user driver frameworks can also be extended
to support IOMMU UAPI but it is outside the scope of this document.
In this tight-knit VFIO-IOMMU interface, the ultimate consumer of the
IOMMU UAPI data is the host IOMMU driver. VFIO facilitates user-kernel
transport, capability checking, security, and life cycle management of
process address space ID (PASID).
VFIO layer conveys the data structures down to the IOMMU driver. It
follows the pattern below::
struct {
__u32 argsz;
__u32 flags;
__u8 data[];
};
Here data[] contains the IOMMU UAPI data structures. VFIO has the
freedom to bundle the data as well as parse data size based on its own flags.
In order to determine the size and feature set of the user data, argsz
and flags (or the equivalent) are also embedded in the IOMMU UAPI data
structures.
A "__u32 argsz" field is *always* at the beginning of each structure.
For example:
::
struct iommu_cache_invalidate_info {
__u32 argsz;
#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
__u32 version;
/* IOMMU paging structure cache */
#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
#define IOMMU_CACHE_INV_TYPE_NR (3)
__u8 cache;
__u8 granularity;
__u8 padding[6];
union {
struct iommu_inv_pasid_info pasid_info;
struct iommu_inv_addr_info addr_info;
} granu;
};
VFIO is responsible for checking its own argsz and flags. It then
invokes appropriate IOMMU UAPI functions. The user pointers are passed
to the IOMMU layer for further processing. The responsibilities are
divided as follows:
- Generic IOMMU layer checks argsz range based on UAPI data in the
current kernel version.
- Generic IOMMU layer checks content of the UAPI data for non-zero
reserved bits in flags, padding fields, and unsupported version.
This is to ensure not breaking userspace in the future when these
fields or flags are used.
- Vendor IOMMU driver checks argsz based on vendor flags. UAPI data
is consumed based on flags. Vendor driver has access to
unadulterated argsz value in case of vendor specific future
extensions. Currently, it does not perform the copy_from_user()
itself. A __user pointer can be provided in some future scenarios
where there's vendor data outside of the structure definition.
IOMMU code treats UAPI data in two categories:
- structure contains vendor data
(Example: iommu_uapi_cache_invalidate())
- structure contains only generic data
(Example: iommu_uapi_sva_bind_gpasid())
Sharing UAPI with in-kernel users
---------------------------------
For UAPIs that are shared with in-kernel users, a wrapper function is
provided to distinguish the callers. For example,
Userspace caller ::
int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev,
void __user *udata)
In-kernel caller ::
int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, ioasid_t ioasid);
......@@ -1506,8 +1506,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/iommu/arm,smmu*
F: drivers/iommu/arm/
F: drivers/iommu/io-pgtable-arm-v7s.c
F: drivers/iommu/io-pgtable-arm.c
F: drivers/iommu/io-pgtable-arm*
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
......@@ -9120,6 +9119,7 @@ L: iommu@lists.linux-foundation.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
F: Documentation/devicetree/bindings/iommu/
F: Documentation/userspace-api/iommu.rst
F: drivers/iommu/
F: include/linux/iommu.h
F: include/linux/iova.h
......
......@@ -386,6 +386,54 @@ thermal: thermal@e61f0000 {
#thermal-sensor-cells = <0>;
};
ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7742",
"renesas,ipmmu-vmsa";
reg = <0 0xe6280000 0 0x1000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
status = "disabled";
};
ipmmu_sy1: iommu@e6290000 {
compatible = "renesas,ipmmu-r8a7742",
"renesas,ipmmu-vmsa";
reg = <0 0xe6290000 0 0x1000>;
interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
status = "disabled";
};
ipmmu_ds: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7742",
"renesas,ipmmu-vmsa";
reg = <0 0xe6740000 0 0x1000>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
status = "disabled";
};
ipmmu_mp: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7742",
"renesas,ipmmu-vmsa";
reg = <0 0xec680000 0 0x1000>;
interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
status = "disabled";
};
ipmmu_mx: iommu@fe951000 {
compatible = "renesas,ipmmu-r8a7742",
"renesas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
status = "disabled";
};
icram0: sram@e63a0000 {
compatible = "mmio-sram";
reg = <0 0xe63a0000 0 0x12000>;
......
......@@ -45,6 +45,7 @@
#define rmb() dsb(ld)
#define wmb() dsb(st)
#define dma_mb() dmb(osh)
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
......
......@@ -110,6 +110,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_par(v) __iormb(v)
#define __iowmb() dma_wmb()
#define __iomb() dma_mb()
/*
* Relaxed I/O memory access primitives. These follow the Device memory
......
......@@ -17,11 +17,14 @@
#ifndef __ASSEMBLY__
#include <linux/refcount.h>
typedef struct {
atomic64_t id;
#ifdef CONFIG_COMPAT
void *sigpage;
#endif
refcount_t pinned;
void *vdso;
unsigned long flags;
} mm_context_t;
......
......@@ -177,7 +177,13 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
#define destroy_context(mm) do { } while(0)
void check_and_switch_context(struct mm_struct *mm);
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
atomic64_set(&mm->context.id, 0);
refcount_set(&mm->context.pinned, 0);
return 0;
}
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void update_saved_ttbr0(struct task_struct *tsk,
......@@ -248,6 +254,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
unsigned long arm64_mm_context_get(struct mm_struct *mm);
void arm64_mm_context_put(struct mm_struct *mm);
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_MMU_CONTEXT_H */
......@@ -1111,6 +1111,7 @@ u64 read_sanitised_ftr_reg(u32 id)
return 0;
return regp->sys_val;
}
EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
#define read_sysreg_case(r) \
case r: return read_sysreg_s(r)
......
......@@ -27,6 +27,10 @@ static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
static unsigned long max_pinned_asids;
static unsigned long nr_pinned_asids;
static unsigned long *pinned_asid_map;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
......@@ -72,7 +76,7 @@ void verify_cpu_asid_bits(void)
}
}
static void set_kpti_asid_bits(void)
static void set_kpti_asid_bits(unsigned long *map)
{
unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
/*
......@@ -81,13 +85,15 @@ static void set_kpti_asid_bits(void)
* is set, then the ASID will map only userspace. Thus
* mark even as reserved for kernel.
*/
memset(asid_map, 0xaa, len);
memset(map, 0xaa, len);
}
static void set_reserved_asid_bits(void)
{
if (arm64_kernel_unmapped_at_el0())
set_kpti_asid_bits();
if (pinned_asid_map)
bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
else if (arm64_kernel_unmapped_at_el0())
set_kpti_asid_bits(asid_map);
else
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
}
......@@ -165,6 +171,14 @@ static u64 new_context(struct mm_struct *mm)
if (check_update_reserved_asid(asid, newasid))
return newasid;
/*
* If it is pinned, we can keep using it. Note that reserved
* takes priority, because even if it is also pinned, we need to
* update the generation into the reserved_asids.
*/
if (refcount_read(&mm->context.pinned))
return newasid;
/*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
......@@ -256,6 +270,71 @@ void check_and_switch_context(struct mm_struct *mm)
cpu_switch_mm(mm->pgd, mm);
}
unsigned long arm64_mm_context_get(struct mm_struct *mm)
{
unsigned long flags;
u64 asid;
if (!pinned_asid_map)
return 0;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
asid = atomic64_read(&mm->context.id);
if (refcount_inc_not_zero(&mm->context.pinned))
goto out_unlock;
if (nr_pinned_asids >= max_pinned_asids) {
asid = 0;
goto out_unlock;
}
if (!asid_gen_match(asid)) {
/*
* We went through one or more rollover since that ASID was
* used. Ensure that it is still valid, or generate a new one.
*/
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
nr_pinned_asids++;
__set_bit(asid2idx(asid), pinned_asid_map);
refcount_set(&mm->context.pinned, 1);
out_unlock:
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
asid &= ~ASID_MASK;
/* Set the equivalent of USER_ASID_BIT */
if (asid && arm64_kernel_unmapped_at_el0())
asid |= 1;
return asid;
}
EXPORT_SYMBOL_GPL(arm64_mm_context_get);
void arm64_mm_context_put(struct mm_struct *mm)
{
unsigned long flags;
u64 asid = atomic64_read(&mm->context.id);
if (!pinned_asid_map)
return;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
if (refcount_dec_and_test(&mm->context.pinned)) {
__clear_bit(asid2idx(asid), pinned_asid_map);
nr_pinned_asids--;
}
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
}
EXPORT_SYMBOL_GPL(arm64_mm_context_put);
/* Errata workaround post TTBRx_EL1 update. */
asmlinkage void post_ttbr_update_workaround(void)
{
......@@ -296,8 +375,11 @@ static int asids_update_limit(void)
{
unsigned long num_available_asids = NUM_USER_ASIDS;
if (arm64_kernel_unmapped_at_el0())
if (arm64_kernel_unmapped_at_el0()) {
num_available_asids /= 2;
if (pinned_asid_map)
set_kpti_asid_bits(pinned_asid_map);
}
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
......@@ -305,6 +387,13 @@ static int asids_update_limit(void)
WARN_ON(num_available_asids - 1 <= num_possible_cpus());
pr_info("ASID allocator initialised with %lu entries\n",
num_available_asids);
/*
* There must always be an ASID available after rollover. Ensure that,
* even if all CPUs have a reserved ASID and the maximum number of ASIDs
* are pinned, there still is at least one empty slot in the ASID map.
*/
max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
return 0;
}
arch_initcall(asids_update_limit);
......@@ -319,13 +408,17 @@ static int asids_init(void)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
sizeof(*pinned_asid_map), GFP_KERNEL);
nr_pinned_asids = 0;
/*
* We cannot call set_reserved_asid_bits() here because CPU
* caps are not finalized yet, so it is safer to assume KPTI
* and reserve kernel ASID's from beginning.
*/
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
set_kpti_asid_bits();
set_kpti_asid_bits(asid_map);
return 0;
}
early_initcall(asids_init);
......@@ -232,7 +232,7 @@ config IPMMU_VMSA
select ARM_DMA_USE_IOMMU
help
Support for the Renesas VMSA-compatible IPMMU found in the R-Mobile
APE6, R-Car Gen2, and R-Car Gen3 SoCs.
APE6, R-Car Gen{2,3} and RZ/G{1,2} SoCs.
If unsure, say N.
......@@ -308,6 +308,16 @@ config ARM_SMMU_V3
Say Y here if your system includes an IOMMU device implementing
the ARM SMMUv3 architecture.
config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
help
Support for sharing process address spaces with devices using the
SMMUv3.
Say Y here if your system supports SVA extensions such as PCIe PASID
and PRI.
config S390_IOMMU
def_bool y if S390 && PCI
depends on S390 && PCI
......
......@@ -41,6 +41,15 @@ extern int amd_iommu_guest_ir;
struct iommu_domain;
extern bool amd_iommu_v2_supported(void);
extern struct amd_iommu *get_amd_iommu(unsigned int idx);
extern u8 amd_iommu_pc_get_max_banks(unsigned int idx);
extern bool amd_iommu_pc_supported(void);
extern u8 amd_iommu_pc_get_max_counters(unsigned int idx);
extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value);
extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value);
extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
......
......@@ -93,6 +93,7 @@
#define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21)
#define FEATURE_EPHSUP (1ULL<<50)
#define FEATURE_SNP (1ULL<<63)
#define FEATURE_PASID_SHIFT 32
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
......@@ -128,6 +129,8 @@
#define EVENT_TYPE_IOTLB_INV_TO 0x7
#define EVENT_TYPE_INV_DEV_REQ 0x8
#define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_TYPE_RMP_FAULT 0xd
#define EVENT_TYPE_RMP_HW_ERR 0xe
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK_LO 0xffff
......@@ -595,7 +598,8 @@ struct amd_iommu {
#endif
u32 flags;
volatile u64 __aligned(8) cmd_sem;
volatile u64 *cmd_sem;
u64 cmd_sem_val;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
......
......@@ -359,6 +359,29 @@ static void iommu_set_exclusion_range(struct amd_iommu *iommu)
&entry, sizeof(entry));
}
static void iommu_set_cwwb_range(struct amd_iommu *iommu)
{
u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
u64 entry = start & PM_ADDR_MASK;
if (!iommu_feature(iommu, FEATURE_SNP))
return;
/* Note:
* Re-purpose Exclusion base/limit registers for Completion wait
* write-back base/limit.
*/
memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
&entry, sizeof(entry));
/* Note:
* Default to 4 Kbytes, which can be specified by setting base
* address equal to the limit address.
*/
memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
&entry, sizeof(entry));
}
/* Programs the physical address of the device table into the IOMMU hardware */
static void iommu_set_device_table(struct amd_iommu *iommu)
{
......@@ -813,6 +836,19 @@ static int iommu_init_ga(struct amd_iommu *iommu)
return ret;
}
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
return iommu->cmd_sem ? 0 : -ENOMEM;
}
static void __init free_cwwb_sem(struct amd_iommu *iommu)
{
if (iommu->cmd_sem)
free_page((unsigned long)iommu->cmd_sem);
}
static void iommu_enable_xt(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
......@@ -1376,6 +1412,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);
free_ppr_log(iommu);
......@@ -1462,6 +1499,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
int ret;
raw_spin_lock_init(&iommu->lock);
iommu->cmd_sem_val = 0;
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
......@@ -1539,6 +1577,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base)
return -ENOMEM;
if (alloc_cwwb_sem(iommu))
return -ENOMEM;
if (alloc_command_buffer(iommu))
return -ENOMEM;
......@@ -1576,7 +1617,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
/**
* get_highest_supported_ivhd_type - Look up the appropriate IVHD type
* @ivrs Pointer to the IVRS header
* @ivrs: Pointer to the IVRS header
*
* This function search through all IVDB of the maximum supported IVHD
*/
......@@ -1864,6 +1905,9 @@ static int __init amd_iommu_init_pci(void)
ret = iommu_init_pci(iommu);
if (ret)
break;
/* Need to setup range after PCI init */
iommu_set_cwwb_range(iommu);
}
/*
......@@ -1927,7 +1971,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
/**
/*
* Setup the IntCapXT registers with interrupt routing information
* based on the PCI MSI capability block registers, accessed via
* MMIO MSI address low/hi and MSI data registers.
......
......@@ -486,6 +486,67 @@ static void dump_command(unsigned long phys_addr)
pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
}
static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, vmg_tag, flags;
struct pci_dev *pdev;
u64 spa;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
vmg_tag = (event[1]) & 0xFFFF;
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
if (dev_data && __ratelimit(&dev_data->rs)) {
pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
vmg_tag, spa, flags);
} else {
pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, spa, flags);
}
if (pdev)
pci_dev_put(pdev);
}
static void amd_iommu_report_rmp_fault(volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, flags_rmp, vmg_tag, flags;
struct pci_dev *pdev;
u64 gpa;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
vmg_tag = (event[1]) & 0xFFFF;
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
gpa = ((u64)event[3] << 32) | event[2];
pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
if (dev_data && __ratelimit(&dev_data->rs)) {
pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
vmg_tag, gpa, flags_rmp, flags);
} else {
pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, gpa, flags_rmp, flags);
}
if (pdev)
pci_dev_put(pdev);
}
static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
u64 address, int flags)
{
......@@ -577,6 +638,12 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_RMP_FAULT:
amd_iommu_report_rmp_fault(event);
break;
case EVENT_TYPE_RMP_HW_ERR:
amd_iommu_report_rmp_hw_error(event);
break;
case EVENT_TYPE_INV_PPR_REQ:
pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
......@@ -792,11 +859,11 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
*
****************************************************************************/
static int wait_on_sem(volatile u64 *sem)
static int wait_on_sem(struct amd_iommu *iommu, u64 data)
{
int i = 0;
while (*sem == 0 && i < LOOP_TIMEOUT) {
while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
udelay(1);
i += 1;
}
......@@ -827,16 +894,16 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
}
static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
static void build_completion_wait(struct iommu_cmd *cmd,
struct amd_iommu *iommu,
u64 data)
{
u64 paddr = iommu_virt_to_phys((void *)address);
WARN_ON(address & 0x7ULL);
u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
cmd->data[2] = 1;
cmd->data[2] = data;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
......@@ -1045,22 +1112,21 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
struct iommu_cmd cmd;
unsigned long flags;
int ret;
u64 data;
if (!iommu->need_sync)
return 0;
build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
raw_spin_lock_irqsave(&iommu->lock, flags);
iommu->cmd_sem = 0;
data = ++iommu->cmd_sem_val;
build_completion_wait(&cmd, iommu, data);
ret = __iommu_queue_command_sync(iommu, &cmd, false);
if (ret)
goto out_unlock;
ret = wait_on_sem(&iommu->cmd_sem);
ret = wait_on_sem(iommu, data);
out_unlock:
raw_spin_unlock_irqrestore(&iommu->lock, flags);
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
arm_smmu_v3-objs-y += arm-smmu-v3.o
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of the IOMMU SVA API for the ARM SMMUv3
*/
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/slab.h>
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
static DEFINE_MUTEX(sva_lock);
/*
* Check if the CPU ASID is available on the SMMU side. If a private context
* descriptor is using it, try to replace it.
*/
static struct arm_smmu_ctx_desc *
arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
{
int ret;
u32 new_asid;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain;
cd = xa_load(&arm_smmu_asid_xa, asid);
if (!cd)
return NULL;
if (cd->mm) {
if (WARN_ON(cd->mm != mm))
return ERR_PTR(-EINVAL);
/* All devices bound to this mm use the same cd struct. */
refcount_inc(&cd->refs);
return cd;
}
smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
smmu = smmu_domain->smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
if (ret)
return ERR_PTR(-ENOSPC);
/*
* Race with unmap: TLB invalidations will start targeting the new ASID,
* which isn't assigned yet. We'll do an invalidate-all on the old ASID
* later, so it doesn't matter.
*/
cd->asid = new_asid;
/*
* Update ASID and invalidate CD in all associated masters. There will
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
xa_erase(&arm_smmu_asid_xa, asid);
return NULL;
}
__maybe_unused
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
{
u16 asid;
int err = 0;
u64 tcr, par, reg;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_ctx_desc *ret = NULL;
asid = arm64_mm_context_get(mm);
if (!asid)
return ERR_PTR(-ESRCH);
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
err = -ENOMEM;
goto out_put_context;
}
refcount_set(&cd->refs, 1);
mutex_lock(&arm_smmu_asid_lock);
ret = arm_smmu_share_asid(mm, asid);
if (ret) {
mutex_unlock(&arm_smmu_asid_lock);
goto out_free_cd;
}
err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
mutex_unlock(&arm_smmu_asid_lock);
if (err)
goto out_free_asid;
tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
switch (PAGE_SIZE) {
case SZ_4K:
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
break;
case SZ_16K:
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
break;
case SZ_64K:
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
break;
default:
WARN_ON(1);
err = -EINVAL;
goto out_free_asid;
}
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
cd->ttbr = virt_to_phys(mm->pgd);
cd->tcr = tcr;
/*
* MAIR value is pretty much constant and global, so we can just get it
* from the current CPU register
*/
cd->mair = read_sysreg(mair_el1);
cd->asid = asid;
cd->mm = mm;
return cd;
out_free_asid:
arm_smmu_free_asid(cd);
out_free_cd:
kfree(cd);
out_put_context:
arm64_mm_context_put(mm);
return err < 0 ? ERR_PTR(err) : ret;
}
__maybe_unused
static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
{
if (arm_smmu_free_asid(cd)) {
/* Unpin ASID */
arm64_mm_context_put(cd->mm);
kfree(cd);
}
}
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
unsigned long reg, fld;
unsigned long oas;
unsigned long asid_bits;
u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
if (vabits_actual == 52)
feat_mask |= ARM_SMMU_FEAT_VAX;
if ((smmu->features & feat_mask) != feat_mask)
return false;
if (!(smmu->pgsize_bitmap & PAGE_SIZE))
return false;
/*
* Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
* not even pretending to support AArch32 here. Abort if the MMU outputs
* addresses larger than what we support.
*/
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
oas = id_aa64mmfr0_parange_to_phys_shift(fld);
if (smmu->oas < oas)
return false;
/* We can support bigger ASIDs than the CPU, but not smaller */
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
asid_bits = fld ? 16 : 8;
if (smmu->asid_bits < asid_bits)
return false;
/*
* See max_pinned_asids in arch/arm64/mm/context.c. The following is
* generally the maximum number of bindable processes.
*/
if (arm64_kernel_unmapped_at_el0())
asid_bits--;
dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
num_possible_cpus() - 2);
return true;
}
static bool arm_smmu_iopf_supported(struct arm_smmu_master *master)
{
return false;
}
bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
{
if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
return false;
/* SSID and IOPF support are mandatory for the moment */
return master->ssid_bits && arm_smmu_iopf_supported(master);
}
bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
{
bool enabled;
mutex_lock(&sva_lock);
enabled = master->sva_enabled;
mutex_unlock(&sva_lock);
return enabled;
}
int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
{
mutex_lock(&sva_lock);
master->sva_enabled = true;
mutex_unlock(&sva_lock);
return 0;
}
int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
{
mutex_lock(&sva_lock);
if (!list_empty(&master->bonds)) {
dev_err(master->dev, "cannot disable SVA, device is bound\n");
mutex_unlock(&sva_lock);
return -EBUSY;
}
master->sva_enabled = false;
mutex_unlock(&sva_lock);
return 0;
}
This diff is collapsed.
This diff is collapsed.
......@@ -68,7 +68,8 @@ static int cavium_cfg_probe(struct arm_smmu_device *smmu)
return 0;
}
static int cavium_init_context(struct arm_smmu_domain *smmu_domain)
static int cavium_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
struct cavium_smmu *cs = container_of(smmu_domain->smmu,
struct cavium_smmu, smmu);
......
......@@ -65,41 +65,10 @@ module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
struct arm_smmu_s2cr {
struct iommu_group *group;
int count;
enum arm_smmu_s2cr_type type;
enum arm_smmu_s2cr_privcfg privcfg;
u8 cbndx;
};
#define s2cr_init_val (struct arm_smmu_s2cr){ \
.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
}
struct arm_smmu_smr {
u16 mask;
u16 id;
bool valid;
};
struct arm_smmu_cb {
u64 ttbr[2];
u32 tcr[2];
u32 mair[2];
struct arm_smmu_cfg *cfg;
};
struct arm_smmu_master_cfg {
struct arm_smmu_device *smmu;
s16 smendx[];
};
#define INVALID_SMENDX -1
#define cfg_smendx(cfg, fw, i) \
(i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
#define for_each_cfg_sme(cfg, fw, i, idx) \
for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
static bool using_legacy_binding, using_generic_binding;
static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
......@@ -234,19 +203,6 @@ static int arm_smmu_register_legacy_master(struct device *dev,
}
#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
{
int idx;
do {
idx = find_next_zero_bit(map, end, start);
if (idx == end)
return -ENOSPC;
} while (test_and_set_bit(idx, map));
return idx;
}
static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
{
clear_bit(idx, map);
......@@ -552,11 +508,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
cb->ttbr[1] = 0;
} else {
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID,
cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
cfg->asid);
cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
cfg->asid);
if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
else
cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
}
} else {
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
......@@ -574,7 +534,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
}
static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
{
u32 reg;
bool stage1;
......@@ -660,8 +620,19 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
}
static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev, unsigned int start)
{
if (smmu->impl && smmu->impl->alloc_context_bank)
return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
}
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_device *smmu)
struct arm_smmu_device *smmu,
struct device *dev)
{
int irq, start, ret = 0;
unsigned long ias, oas;
......@@ -776,10 +747,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = -EINVAL;
goto out_unlock;
}
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
if (ret < 0)
ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
if (ret < 0) {
goto out_unlock;
}
smmu_domain->smmu = smmu;
cfg->cbndx = ret;
if (smmu->version < ARM_SMMU_V2) {
......@@ -794,13 +768,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
else
cfg->asid = cfg->cbndx;
smmu_domain->smmu = smmu;
if (smmu->impl && smmu->impl->init_context) {
ret = smmu->impl->init_context(smmu_domain);
if (ret)
goto out_unlock;
}
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
......@@ -810,6 +777,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.iommu_dev = smmu->dev,
};
if (smmu->impl && smmu->impl->init_context) {
ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
if (ret)
goto out_clear_smmu;
}
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
......@@ -821,7 +794,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
/* Update the domain's page sizes to reflect the page table format */
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
domain->geometry.aperture_start = ~0UL << ias;
domain->geometry.aperture_end = ~0UL;
} else {
domain->geometry.aperture_end = (1UL << ias) - 1;
}
domain->geometry.force_aperture = true;
/* Initialise the context bank with our page table cfg */
......@@ -1182,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return ret;
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
ret = arm_smmu_init_domain_context(domain, smmu, dev);
if (ret < 0)
goto rpm_put;
......
......@@ -169,10 +169,12 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_TCR 0x30
#define ARM_SMMU_TCR_EAE BIT(31)
#define ARM_SMMU_TCR_EPD1 BIT(23)
#define ARM_SMMU_TCR_A1 BIT(22)
#define ARM_SMMU_TCR_TG0 GENMASK(15, 14)
#define ARM_SMMU_TCR_SH0 GENMASK(13, 12)
#define ARM_SMMU_TCR_ORGN0 GENMASK(11, 10)
#define ARM_SMMU_TCR_IRGN0 GENMASK(9, 8)
#define ARM_SMMU_TCR_EPD0 BIT(7)
#define ARM_SMMU_TCR_T0SZ GENMASK(5, 0)
#define ARM_SMMU_VTCR_RES1 BIT(31)
......@@ -254,6 +256,21 @@ enum arm_smmu_implementation {
QCOM_SMMUV2,
};
struct arm_smmu_s2cr {
struct iommu_group *group;
int count;
enum arm_smmu_s2cr_type type;
enum arm_smmu_s2cr_privcfg privcfg;
u8 cbndx;
};
struct arm_smmu_smr {
u16 mask;
u16 id;
bool valid;
bool pinned;
};
struct arm_smmu_device {
struct device *dev;
......@@ -329,6 +346,13 @@ struct arm_smmu_cfg {
};
#define ARM_SMMU_INVALID_IRPTNDX 0xff
struct arm_smmu_cb {
u64 ttbr[2];
u32 tcr[2];
u32 mair[2];
struct arm_smmu_cfg *cfg;
};
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
......@@ -348,23 +372,39 @@ struct arm_smmu_domain {
struct iommu_domain domain;
};
static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg)
struct arm_smmu_master_cfg {
struct arm_smmu_device *smmu;
s16 smendx[];
};
static inline u32 arm_smmu_lpae_tcr(const struct io_pgtable_cfg *cfg)
{
return ARM_SMMU_TCR_EPD1 |
FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
u32 tcr = FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz);
/*
* When TTBR1 is selected shift the TCR fields by 16 bits and disable
* translation in TTBR0
*/
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
tcr = (tcr << 16) & ~ARM_SMMU_TCR_A1;
tcr |= ARM_SMMU_TCR_EPD0;
} else
tcr |= ARM_SMMU_TCR_EPD1;
return tcr;
}
static inline u32 arm_smmu_lpae_tcr2(struct io_pgtable_cfg *cfg)
static inline u32 arm_smmu_lpae_tcr2(const struct io_pgtable_cfg *cfg)
{
return FIELD_PREP(ARM_SMMU_TCR2_PASIZE, cfg->arm_lpae_s1_cfg.tcr.ips) |
FIELD_PREP(ARM_SMMU_TCR2_SEP, ARM_SMMU_TCR2_SEP_UPSTREAM);
}
static inline u32 arm_smmu_lpae_vtcr(struct io_pgtable_cfg *cfg)
static inline u32 arm_smmu_lpae_vtcr(const struct io_pgtable_cfg *cfg)
{
return ARM_SMMU_VTCR_RES1 |
FIELD_PREP(ARM_SMMU_VTCR_PS, cfg->arm_lpae_s2_cfg.vtcr.ps) |
......@@ -386,14 +426,37 @@ struct arm_smmu_impl {
u64 val);
int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu);
int (*init_context)(struct arm_smmu_domain *smmu_domain);
int (*init_context)(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *cfg, struct device *dev);
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
int (*def_domain_type)(struct device *dev);
irqreturn_t (*global_fault)(int irq, void *dev);
irqreturn_t (*context_fault)(int irq, void *dev);
int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev, int start);
};
#define INVALID_SMENDX -1
#define cfg_smendx(cfg, fw, i) \
(i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
#define for_each_cfg_sme(cfg, fw, i, idx) \
for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
static inline int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
{
int idx;
do {
idx = find_next_zero_bit(map, end, start);
if (idx == end)
return -ENOSPC;
} while (test_and_set_bit(idx, map));
return idx;
}
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
{
return smmu->base + (n << smmu->pgshift);
......@@ -458,6 +521,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx);
int arm_mmu500_reset(struct arm_smmu_device *smmu);
#endif /* _ARM_SMMU_H */
......@@ -584,8 +584,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
* index into qcom_iommu->ctxs:
*/
if (WARN_ON(asid < 1) ||
WARN_ON(asid > qcom_iommu->num_ctxs))
WARN_ON(asid > qcom_iommu->num_ctxs)) {
put_device(&iommu_pdev->dev);
return -EINVAL;
}
if (!dev_iommu_priv_get(dev)) {
dev_iommu_priv_set(dev, qcom_iommu);
......@@ -594,9 +596,11 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
* multiple different iommu devices. Multiple context
* banks are ok, but multiple devices are not:
*/
if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
put_device(&iommu_pdev->dev);
return -EINVAL;
}
}
return iommu_fwspec_add_ids(dev, &asid, 1);
}
......@@ -752,7 +756,7 @@ static const struct of_device_id ctx_of_match[] = {
static struct platform_driver qcom_iommu_ctx_driver = {
.driver = {
.name = "qcom-iommu-ctx",
.of_match_table = of_match_ptr(ctx_of_match),
.of_match_table = ctx_of_match,
},
.probe = qcom_iommu_ctx_probe,
.remove = qcom_iommu_ctx_remove,
......@@ -915,7 +919,7 @@ static const struct of_device_id qcom_iommu_of_match[] = {
static struct platform_driver qcom_iommu_driver = {
.driver = {
.name = "qcom-iommu",
.of_match_table = of_match_ptr(qcom_iommu_of_match),
.of_match_table = qcom_iommu_of_match,
.pm = &qcom_iommu_pm_ops,
},
.probe = qcom_iommu_device_probe,
......
......@@ -343,8 +343,11 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
NULL))
pr_warn("iova flush queue initialization failed\n");
else
cookie->fq_domain = domain;
init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
}
if (!dev)
......@@ -471,7 +474,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
WARN_ON(unmapped != size);
if (!cookie->fq_domain)
iommu_tlb_sync(domain, &iotlb_gather);
iommu_iotlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size);
}
......@@ -524,6 +527,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
/* IOMMU can map any pages, so himem can also be used here */
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
/* It makes no sense to muck about with huge pages */
gfp &= ~__GFP_COMP;
while (count) {
struct page *page = NULL;
unsigned int order_size;
......@@ -544,15 +550,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
page = alloc_pages_node(nid, alloc_flags, order);
if (!page)
continue;
if (!order)
break;
if (!PageCompound(page)) {
if (order)
split_page(page, order);
break;
} else if (!split_huge_page(page)) {
break;
}
__free_pages(page, order);
}
if (!page) {
__iommu_dma_free_pages(pages, i);
......
......@@ -1174,7 +1174,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
if (irq != NO_IRQ)
free_irq(irq, data);
kzfree(data);
kfree_sensitive(data);
if (pamu_regs)
iounmap(pamu_regs);
......
......@@ -380,7 +380,7 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
return NULL;
}
/**
/*
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
* present in the platform
......@@ -1024,8 +1024,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
int agaw = 0;
int msagaw = 0;
int agaw = -1;
int msagaw = -1;
int err;
if (!drhd->reg_base_addr) {
......@@ -1050,17 +1050,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
err = -EINVAL;
if (cap_sagaw(iommu->cap) == 0) {
pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
iommu->name);
drhd->ignored = 1;
}
if (!drhd->ignored) {
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto err_unmap;
drhd->ignored = 1;
}
}
if (!drhd->ignored) {
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto err_unmap;
drhd->ignored = 1;
agaw = -1;
}
}
iommu->agaw = agaw;
iommu->msagaw = msagaw;
......@@ -1087,7 +1098,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock);
if (intel_iommu_enabled) {
/*
* This is only for hotplug; at boot time intel_iommu_enabled won't
* be set yet. When intel_iommu_init() runs, it registers the units
* present at boot time, then sets intel_iommu_enabled.
*/
if (intel_iommu_enabled && !drhd->ignored) {
err = iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
......@@ -1117,7 +1133,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
static void free_iommu(struct intel_iommu *iommu)
{
if (intel_iommu_enabled) {
if (intel_iommu_enabled && iommu->iommu.ops) {
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
}
......
......@@ -698,12 +698,47 @@ static int domain_update_iommu_superpage(struct dmar_domain *domain,
return fls(mask);
}
static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
assert_spin_locked(&device_domain_lock);
if (list_empty(&domain->devices))
return NUMA_NO_NODE;
list_for_each_entry(info, &domain->devices, link) {
if (!info->dev)
continue;
/*
* There could possibly be multiple device numa nodes as devices
* within the same domain may sit behind different IOMMUs. There
* isn't perfect answer in such situation, so we select first
* come first served policy.
*/
nid = dev_to_node(info->dev);
if (nid != NUMA_NO_NODE)
break;
}
return nid;
}
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain->iommu_snooping = domain_update_iommu_snooping(NULL);
domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
/*
* If RHSA is missing, we should default to the device numa domain
* as fall back.
*/
if (domain->nid == NUMA_NO_NODE)
domain->nid = domain_update_device_node(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
......@@ -5095,8 +5130,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
if (type == IOMMU_DOMAIN_DMA)
intel_init_iova_domain(dmar_domain);
domain_update_iommu_cap(dmar_domain);
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end =
......@@ -5408,8 +5441,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
int ret = 0;
u64 size = 0;
if (!inv_info || !dmar_domain ||
inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
if (!inv_info || !dmar_domain)
return -EINVAL;
if (!dev || !dev_is_pci(dev))
......@@ -5434,8 +5466,8 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
/* Size is only valid in address selective invalidation */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
size = to_vtd_size(inv_info->addr_info.granule_size,
inv_info->addr_info.nb_granules);
size = to_vtd_size(inv_info->granu.addr_info.granule_size,
inv_info->granu.addr_info.nb_granules);
for_each_set_bit(cache_type,
(unsigned long *)&inv_info->cache,
......@@ -5456,20 +5488,20 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
* granularity.
*/
if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
(inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
pasid = inv_info->pasid_info.pasid;
(inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
pasid = inv_info->granu.pasid_info.pasid;
else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
(inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
pasid = inv_info->addr_info.pasid;
(inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
pasid = inv_info->granu.addr_info.pasid;
switch (BIT(cache_type)) {
case IOMMU_CACHE_INV_TYPE_IOTLB:
/* HW will ignore LSB bits based on address mask */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
size &&
(inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
(inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
inv_info->addr_info.addr, size);
inv_info->granu.addr_info.addr, size);
}
/*
......@@ -5477,9 +5509,9 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
* We use npages = -1 to indicate that.
*/
qi_flush_piotlb(iommu, did, pasid,
mm_to_dma_pfn(inv_info->addr_info.addr),
mm_to_dma_pfn(inv_info->granu.addr_info.addr),
(granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
if (!info->ats_enabled)
break;
......@@ -5502,7 +5534,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
size = 64 - VTD_PAGE_SHIFT;
addr = 0;
} else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
addr = inv_info->addr_info.addr;
addr = inv_info->granu.addr_info.addr;
}
if (info->ats_enabled)
......
......@@ -284,8 +284,15 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
if (WARN_ON(!iommu) || !data)
return -EINVAL;
if (data->version != IOMMU_GPASID_BIND_VERSION_1 ||
data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
return -EINVAL;
/* IOMMU core ensures argsz is more than the start of the union */
if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
return -EINVAL;
/* Make sure no undefined flags are used in vendor data */
if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
return -EINVAL;
if (!dev_is_pci(dev))
......@@ -370,7 +377,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
spin_lock(&iommu->lock);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vtd, dmar_domain,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
spin_unlock(&iommu->lock);
if (ret) {
......
......@@ -20,6 +20,8 @@
#include <asm/barrier.h>
#include "io-pgtable-arm.h"
#define ARM_LPAE_MAX_ADDR_BITS 52
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
#define ARM_LPAE_MAX_LEVELS 4
......@@ -100,23 +102,6 @@
#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
/* Register bits */
#define ARM_LPAE_TCR_TG0_4K 0
#define ARM_LPAE_TCR_TG0_64K 1
#define ARM_LPAE_TCR_TG0_16K 2
#define ARM_LPAE_TCR_TG1_16K 1
#define ARM_LPAE_TCR_TG1_4K 2
#define ARM_LPAE_TCR_TG1_64K 3
#define ARM_LPAE_TCR_SH_NS 0
#define ARM_LPAE_TCR_SH_OS 2
#define ARM_LPAE_TCR_SH_IS 3
#define ARM_LPAE_TCR_RGN_NC 0
#define ARM_LPAE_TCR_RGN_WBWA 1
#define ARM_LPAE_TCR_RGN_WT 2
#define ARM_LPAE_TCR_RGN_WB 3
#define ARM_LPAE_VTCR_SL0_MASK 0x3
#define ARM_LPAE_TCR_T0SZ_SHIFT 0
......@@ -124,14 +109,6 @@
#define ARM_LPAE_VTCR_PS_SHIFT 16
#define ARM_LPAE_VTCR_PS_MASK 0x7
#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef IO_PGTABLE_ARM_H_
#define IO_PGTABLE_ARM_H_
#define ARM_LPAE_TCR_TG0_4K 0
#define ARM_LPAE_TCR_TG0_64K 1
#define ARM_LPAE_TCR_TG0_16K 2
#define ARM_LPAE_TCR_TG1_16K 1
#define ARM_LPAE_TCR_TG1_4K 2
#define ARM_LPAE_TCR_TG1_64K 3
#define ARM_LPAE_TCR_SH_NS 0
#define ARM_LPAE_TCR_SH_OS 2
#define ARM_LPAE_TCR_SH_IS 3
#define ARM_LPAE_TCR_RGN_NC 0
#define ARM_LPAE_TCR_RGN_WBWA 1
#define ARM_LPAE_TCR_RGN_WT 2
#define ARM_LPAE_TCR_RGN_WB 3
#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
#endif /* IO_PGTABLE_ARM_H_ */
......@@ -762,7 +762,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
}
iommu_flush_tlb_all(domain);
iommu_flush_iotlb_all(domain);
out:
iommu_put_resv_regions(dev, &mappings);
......@@ -1961,25 +1961,188 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
struct iommu_cache_invalidate_info *inv_info)
/*
* Check flags and other user provided data for valid combinations. We also
* make sure no reserved fields or unused flags are set. This is to ensure
* not breaking userspace in the future when these fields or flags are used.
*/
static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
{
u32 mask;
int i;
if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
return -EINVAL;
mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
if (info->cache & ~mask)
return -EINVAL;
if (info->granularity >= IOMMU_INV_GRANU_NR)
return -EINVAL;
switch (info->granularity) {
case IOMMU_INV_GRANU_ADDR:
if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
return -EINVAL;
mask = IOMMU_INV_ADDR_FLAGS_PASID |
IOMMU_INV_ADDR_FLAGS_ARCHID |
IOMMU_INV_ADDR_FLAGS_LEAF;
if (info->granu.addr_info.flags & ~mask)
return -EINVAL;
break;
case IOMMU_INV_GRANU_PASID:
mask = IOMMU_INV_PASID_FLAGS_PASID |
IOMMU_INV_PASID_FLAGS_ARCHID;
if (info->granu.pasid_info.flags & ~mask)
return -EINVAL;
break;
case IOMMU_INV_GRANU_DOMAIN:
if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
return -EINVAL;
break;
default:
return -EINVAL;
}
/* Check reserved padding fields */
for (i = 0; i < sizeof(info->padding); i++) {
if (info->padding[i])
return -EINVAL;
}
return 0;
}
int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
void __user *uinfo)
{
struct iommu_cache_invalidate_info inv_info = { 0 };
u32 minsz;
int ret;
if (unlikely(!domain->ops->cache_invalidate))
return -ENODEV;
return domain->ops->cache_invalidate(domain, dev, inv_info);
/*
* No new spaces can be added before the variable sized union, the
* minimum size is the offset to the union.
*/
minsz = offsetof(struct iommu_cache_invalidate_info, granu);
/* Copy minsz from user to get flags and argsz */
if (copy_from_user(&inv_info, uinfo, minsz))
return -EFAULT;
/* Fields before the variable size union are mandatory */
if (inv_info.argsz < minsz)
return -EINVAL;
/* PASID and address granu require additional info beyond minsz */
if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
return -EINVAL;
if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
return -EINVAL;
/*
* User might be using a newer UAPI header which has a larger data
* size, we shall support the existing flags within the current
* size. Copy the remaining user data _after_ minsz but not more
* than the current kernel supported size.
*/
if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
return -EFAULT;
/* Now the argsz is validated, check the content */
ret = iommu_check_cache_invl_data(&inv_info);
if (ret)
return ret;
return domain->ops->cache_invalidate(domain, dev, &inv_info);
}
EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
int iommu_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, struct iommu_gpasid_bind_data *data)
static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
{
u32 mask;
int i;
if (data->version != IOMMU_GPASID_BIND_VERSION_1)
return -EINVAL;
/* Check the range of supported formats */
if (data->format >= IOMMU_PASID_FORMAT_LAST)
return -EINVAL;
/* Check all flags */
mask = IOMMU_SVA_GPASID_VAL;
if (data->flags & ~mask)
return -EINVAL;
/* Check reserved padding fields */
for (i = 0; i < sizeof(data->padding); i++) {
if (data->padding[i])
return -EINVAL;
}
return 0;
}
static int iommu_sva_prepare_bind_data(void __user *udata,
struct iommu_gpasid_bind_data *data)
{
u32 minsz;
/*
* No new spaces can be added before the variable sized union, the
* minimum size is the offset to the union.
*/
minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
/* Copy minsz from user to get flags and argsz */
if (copy_from_user(data, udata, minsz))
return -EFAULT;
/* Fields before the variable size union are mandatory */
if (data->argsz < minsz)
return -EINVAL;
/*
* User might be using a newer UAPI header, we shall let IOMMU vendor
* driver decide on what size it needs. Since the guest PASID bind data
* can be vendor specific, larger argsz could be the result of extension
* for one vendor but it should not affect another vendor.
* Copy the remaining user data _after_ minsz
*/
if (copy_from_user((void *)data + minsz, udata + minsz,
min_t(u32, data->argsz, sizeof(*data)) - minsz))
return -EFAULT;
return iommu_check_bind_data(data);
}
int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
void __user *udata)
{
struct iommu_gpasid_bind_data data = { 0 };
int ret;
if (unlikely(!domain->ops->sva_bind_gpasid))
return -ENODEV;
return domain->ops->sva_bind_gpasid(domain, dev, data);
ret = iommu_sva_prepare_bind_data(udata, &data);
if (ret)
return ret;
return domain->ops->sva_bind_gpasid(domain, dev, &data);
}
EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid)
......@@ -1991,6 +2154,23 @@ int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
void __user *udata)
{
struct iommu_gpasid_bind_data data = { 0 };
int ret;
if (unlikely(!domain->ops->sva_bind_gpasid))
return -ENODEV;
ret = iommu_sva_prepare_bind_data(udata, &data);
if (ret)
return ret;
return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
}
EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
......@@ -2316,7 +2496,7 @@ size_t iommu_unmap(struct iommu_domain *domain,
iommu_iotlb_gather_init(&iotlb_gather);
ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
iommu_tlb_sync(domain, &iotlb_gather);
iommu_iotlb_sync(domain, &iotlb_gather);
return ret;
}
......
......@@ -579,7 +579,7 @@ void queue_iova(struct iova_domain *iovad,
/* Avoid false sharing as much as possible. */
if (!atomic_read(&iovad->fq_timer_on) &&
!atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
!atomic_xchg(&iovad->fq_timer_on, 1))
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
}
......
......@@ -3,7 +3,6 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
......@@ -15,13 +14,16 @@
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/soc/mediatek/infracfg.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
......@@ -116,6 +118,7 @@
#define OUT_ORDER_WR_EN BIT(4)
#define HAS_SUB_COMM BIT(5)
#define WR_THROT_EN BIT(6)
#define HAS_LEGACY_IVRP_PADDR BIT(7)
#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
((((pdata)->flags) & (_x)) == (_x))
......@@ -582,7 +585,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
if (data->plat_data->m4u_plat == M4U_MT8173)
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
else
regval = lower_32_bits(data->protect_base) |
......@@ -640,8 +643,11 @@ static int mtk_iommu_probe(struct platform_device *pdev)
struct resource *res;
resource_size_t ioaddr;
struct component_match *match = NULL;
struct regmap *infracfg;
void *protect;
int i, larb_nr, ret;
u32 val;
char *p;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
......@@ -655,10 +661,28 @@ static int mtk_iommu_probe(struct platform_device *pdev)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
/* Whether the current dram is over 4GB */
data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
data->enable_4GB = false;
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
switch (data->plat_data->m4u_plat) {
case M4U_MT2712:
p = "mediatek,mt2712-infracfg";
break;
case M4U_MT8173:
p = "mediatek,mt8173-infracfg";
break;
default:
p = NULL;
}
infracfg = syscon_regmap_lookup_by_compatible(p);
if (IS_ERR(infracfg))
return PTR_ERR(infracfg);
ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
if (ret)
return ret;
data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->base = devm_ioremap_resource(dev, res);
......@@ -816,9 +840,17 @@ static const struct mtk_iommu_plat_data mt6779_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */
};
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
HAS_LEGACY_IVRP_PADDR,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
};
......@@ -833,6 +865,7 @@ static const struct mtk_iommu_plat_data mt8183_data = {
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
{}
......@@ -843,7 +876,7 @@ static struct platform_driver mtk_iommu_driver = {
.remove = mtk_iommu_remove,
.driver = {
.name = "mtk-iommu",
.of_match_table = of_match_ptr(mtk_iommu_of_ids),
.of_match_table = mtk_iommu_of_ids,
.pm = &mtk_iommu_pm_ops,
}
};
......
......@@ -39,6 +39,7 @@ enum mtk_iommu_plat {
M4U_MT2701,
M4U_MT2712,
M4U_MT6779,
M4U_MT8167,
M4U_MT8173,
M4U_MT8183,
};
......
......@@ -881,7 +881,6 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
{
struct sun50i_iommu *iommu = dev_id;
phys_addr_t iova;
u32 status;
spin_lock(&iommu->iommu_lock);
......@@ -893,15 +892,15 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
}
if (status & IOMMU_INT_INVALID_L2PG)
iova = sun50i_iommu_handle_pt_irq(iommu,
sun50i_iommu_handle_pt_irq(iommu,
IOMMU_INT_ERR_ADDR_L2_REG,
IOMMU_L2PG_INT_REG);
else if (status & IOMMU_INT_INVALID_L1PG)
iova = sun50i_iommu_handle_pt_irq(iommu,
sun50i_iommu_handle_pt_irq(iommu,
IOMMU_INT_ERR_ADDR_L1_REG,
IOMMU_L1PG_INT_REG);
else
iova = sun50i_iommu_handle_perm_irq(iommu);
sun50i_iommu_handle_perm_irq(iommu);
iommu_write(iommu, IOMMU_INT_CLR_REG, status);
......
......@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <soc/tegra/ahb.h>
......@@ -19,8 +20,10 @@
struct tegra_smmu_group {
struct list_head list;
struct tegra_smmu *smmu;
const struct tegra_smmu_group_soc *soc;
struct iommu_group *group;
unsigned int swgroup;
};
struct tegra_smmu {
......@@ -49,6 +52,7 @@ struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
spinlock_t lock;
u32 *count;
struct page **pts;
struct page *pd;
......@@ -127,6 +131,11 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
......@@ -308,6 +317,8 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
return NULL;
}
spin_lock_init(&as->lock);
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
......@@ -569,19 +580,14 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
dma_addr_t *dmap)
dma_addr_t *dmap, struct page *page)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
if (!as->pts[pde]) {
struct page *page;
dma_addr_t dma;
page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
if (!page)
return NULL;
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
......@@ -644,7 +650,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
u32 *pte, dma_addr_t pte_dma, u32 val)
{
struct tegra_smmu *smmu = as->smmu;
unsigned long offset = offset_in_page(pte);
unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
*pte = val;
......@@ -655,15 +661,61 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
static struct page *as_get_pde_page(struct tegra_smmu_as *as,
unsigned long iova, gfp_t gfp,
unsigned long *flags)
{
unsigned int pde = iova_pd_index(iova);
struct page *page = as->pts[pde];
/* at first check whether allocation needs to be done at all */
if (page)
return page;
/*
* In order to prevent exhaustion of the atomic memory pool, we
* allocate page in a sleeping context if GFP flags permit. Hence
* spinlock needs to be unlocked and re-locked after allocation.
*/
if (!(gfp & __GFP_ATOMIC))
spin_unlock_irqrestore(&as->lock, *flags);
page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
if (!(gfp & __GFP_ATOMIC))
spin_lock_irqsave(&as->lock, *flags);
/*
* In a case of blocking allocation, a concurrent mapping may win
* the PDE allocation. In this case the allocated page isn't needed
* if allocation succeeded and the allocation failure isn't fatal.
*/
if (as->pts[pde]) {
if (page)
__free_page(page);
page = as->pts[pde];
}
return page;
}
static int
__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
unsigned long *flags)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
struct page *page;
u32 pte_attrs;
u32 *pte;
pte = as_get_pte(as, iova, &pte_dma);
page = as_get_pde_page(as, iova, gfp, flags);
if (!page)
return -ENOMEM;
pte = as_get_pte(as, iova, &pte_dma, page);
if (!pte)
return -ENOMEM;
......@@ -680,12 +732,13 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
pte_attrs |= SMMU_PTE_WRITABLE;
tegra_smmu_set_pte(as, iova, pte, pte_dma,
__phys_to_pfn(paddr) | pte_attrs);
SMMU_PHYS_PFN(paddr) | pte_attrs);
return 0;
}
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static size_t
__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
......@@ -702,6 +755,33 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return size;
}
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
unsigned long flags;
int ret;
spin_lock_irqsave(&as->lock, flags);
ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
spin_unlock_irqrestore(&as->lock, flags);
return ret;
}
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
unsigned long flags;
spin_lock_irqsave(&as->lock, flags);
size = __tegra_smmu_unmap(domain, iova, size, gather);
spin_unlock_irqrestore(&as->lock, flags);
return size;
}
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
......@@ -716,7 +796,7 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
pfn = *pte & as->smmu->pfn_mask;
return PFN_PHYS(pfn);
return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
}
static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
......@@ -813,22 +893,34 @@ tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
return NULL;
}
static void tegra_smmu_group_release(void *iommu_data)
{
struct tegra_smmu_group *group = iommu_data;
struct tegra_smmu *smmu = group->smmu;
mutex_lock(&smmu->lock);
list_del(&group->list);
mutex_unlock(&smmu->lock);
}
static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
unsigned int swgroup)
{
const struct tegra_smmu_group_soc *soc;
struct tegra_smmu_group *group;
struct iommu_group *grp;
/* Find group_soc associating with swgroup */
soc = tegra_smmu_find_group(smmu, swgroup);
if (!soc)
return NULL;
mutex_lock(&smmu->lock);
/* Find existing iommu_group associating with swgroup or group_soc */
list_for_each_entry(group, &smmu->groups, list)
if (group->soc == soc) {
if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
grp = iommu_group_ref_get(group->group);
mutex_unlock(&smmu->lock);
return group->group;
return grp;
}
group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
......@@ -838,6 +930,8 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
}
INIT_LIST_HEAD(&group->list);
group->swgroup = swgroup;
group->smmu = smmu;
group->soc = soc;
group->group = iommu_group_alloc();
......@@ -847,6 +941,9 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
return NULL;
}
iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
if (soc)
iommu_group_set_name(group->group, soc->name);
list_add_tail(&group->list, &smmu->groups);
mutex_unlock(&smmu->lock);
......@@ -1019,10 +1116,11 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev;
smmu->mc = mc;
smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
smmu->pfn_mask =
BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
smmu->tlb_mask);
......
......@@ -1073,7 +1073,7 @@ static const struct tegra_smmu_soc tegra210_smmu_soc = {
.num_groups = ARRAY_SIZE(tegra210_groups),
.supports_round_robin_arbitration = true,
.supports_request_limit = true,
.num_tlb_lines = 32,
.num_tlb_lines = 48,
.num_asids = 128,
};
......
......@@ -774,7 +774,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
long unlocked = 0;
struct vfio_regions *entry, *next;
iommu_tlb_sync(domain->domain, iotlb_gather);
iommu_iotlb_sync(domain->domain, iotlb_gather);
list_for_each_entry_safe(entry, next, regions, list) {
unlocked += vfio_unpin_pages_remote(dma,
......
......@@ -5,6 +5,7 @@
#define _DRM_INTEL_GTT_H
#include <linux/agp_backend.h>
#include <linux/intel-iommu.h>
#include <linux/kernel.h>
void intel_gtt_get(u64 *gtt_total,
......@@ -33,8 +34,4 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
#ifdef CONFIG_INTEL_IOMMU
extern int intel_iommu_gfx_mapped;
#endif
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2020 MediaTek Inc.
* Copyright (c) 2020 BayLibre, SAS
* Author: Honghui Zhang <honghui.zhang@mediatek.com>
* Author: Fabien Parent <fparent@baylibre.com>
*/
#ifndef __DTS_IOMMU_PORT_MT8167_H
#define __DTS_IOMMU_PORT_MT8167_H
#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
#define M4U_LARB2_ID 2
/* larb0 */
#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2)
#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 3)
#define M4U_PORT_MDP_RDMA MTK_M4U_ID(M4U_LARB0_ID, 4)
#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 5)
#define M4U_PORT_MDP_WROT MTK_M4U_ID(M4U_LARB0_ID, 6)
#define M4U_PORT_DISP_FAKE MTK_M4U_ID(M4U_LARB0_ID, 7)
/* larb1*/
#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB1_ID, 0)
#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB1_ID, 1)
#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB1_ID, 2)
#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB1_ID, 3)
#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB1_ID, 4)
#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB1_ID, 5)
#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 6)
#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB1_ID, 7)
#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB1_ID, 8)
#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB1_ID, 9)
#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 10)
#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB1_ID, 11)
#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 12)
/* larb2*/
#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB2_ID, 0)
#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB2_ID, 1)
#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB2_ID, 2)
#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB2_ID, 3)
#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB2_ID, 4)
#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB2_ID, 5)
#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB2_ID, 6)
#endif
......@@ -792,6 +792,7 @@ extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
extern int intel_iommu_tboot_noforce;
extern int intel_iommu_gfx_mapped;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
......
......@@ -31,7 +31,7 @@ enum io_pgtable_fmt {
* single page. IOMMUs that cannot batch TLB invalidation
* operations efficiently will typically issue them here, but
* others may decide to update the iommu_iotlb_gather structure
* and defer the invalidation until iommu_tlb_sync() instead.
* and defer the invalidation until iommu_iotlb_sync() instead.
*
* Note that these can all be called in atomic context and must therefore
* not block.
......
......@@ -424,11 +424,14 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern int iommu_cache_invalidate(struct iommu_domain *domain,
extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain,
struct device *dev,
struct iommu_cache_invalidate_info *inv_info);
extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, struct iommu_gpasid_bind_data *data);
void __user *uinfo);
extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, void __user *udata);
extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, void __user *udata);
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
......@@ -514,13 +517,13 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
if (domain->ops->flush_iotlb_all)
domain->ops->flush_iotlb_all(domain);
}
static inline void iommu_tlb_sync(struct iommu_domain *domain,
static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
if (domain->ops->iotlb_sync)
......@@ -543,7 +546,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
if (gather->pgsize != size ||
end < gather->start || start > gather->end) {
if (gather->pgsize)
iommu_tlb_sync(domain, gather);
iommu_iotlb_sync(domain, gather);
gather->pgsize = size;
}
......@@ -725,11 +728,11 @@ static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
return 0;
}
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{
}
static inline void iommu_tlb_sync(struct iommu_domain *domain,
static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
}
......@@ -1033,20 +1036,28 @@ static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
}
static inline int
iommu_cache_invalidate(struct iommu_domain *domain,
iommu_uapi_cache_invalidate(struct iommu_domain *domain,
struct device *dev,
struct iommu_cache_invalidate_info *inv_info)
{
return -ENODEV;
}
static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, struct iommu_gpasid_bind_data *data)
static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, void __user *udata)
{
return -ENODEV;
}
static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, void __user *udata)
{
return -ENODEV;
}
static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, int pasid)
struct device *dev,
ioasid_t pasid)
{
return -ENODEV;
}
......
......@@ -32,6 +32,9 @@
#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
BIT(7) | BIT(8))
#define REG_INFRA_MISC 0xf00
#define F_DDR_4GB_SUPPORT_EN BIT(13)
int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask,
bool reg_update);
int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask,
......
......@@ -139,6 +139,7 @@ enum iommu_page_response_code {
/**
* struct iommu_page_response - Generic page response information
* @argsz: User filled size of this data
* @version: API version of this structure
* @flags: encodes whether the corresponding fields are valid
* (IOMMU_FAULT_PAGE_RESPONSE_* values)
......@@ -147,6 +148,7 @@ enum iommu_page_response_code {
* @code: response code from &enum iommu_page_response_code
*/
struct iommu_page_response {
__u32 argsz;
#define IOMMU_PAGE_RESP_VERSION_1 1
__u32 version;
#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0)
......@@ -222,6 +224,7 @@ struct iommu_inv_pasid_info {
/**
* struct iommu_cache_invalidate_info - First level/stage invalidation
* information
* @argsz: User filled size of this data
* @version: API version of this structure
* @cache: bitfield that allows to select which caches to invalidate
* @granularity: defines the lowest granularity used for the invalidation:
......@@ -250,6 +253,7 @@ struct iommu_inv_pasid_info {
* must support the used granularity.
*/
struct iommu_cache_invalidate_info {
__u32 argsz;
#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
__u32 version;
/* IOMMU paging structure cache */
......@@ -259,11 +263,11 @@ struct iommu_cache_invalidate_info {
#define IOMMU_CACHE_INV_TYPE_NR (3)
__u8 cache;
__u8 granularity;
__u8 padding[2];
__u8 padding[6];
union {
struct iommu_inv_pasid_info pasid_info;
struct iommu_inv_addr_info addr_info;
};
} granu;
};
/**
......@@ -284,6 +288,7 @@ struct iommu_gpasid_bind_data_vtd {
#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
#define IOMMU_SVA_VTD_GPASID_LAST (1 << 6)
__u64 flags;
__u32 pat;
__u32 emt;
......@@ -296,6 +301,7 @@ struct iommu_gpasid_bind_data_vtd {
/**
* struct iommu_gpasid_bind_data - Information about device and guest PASID binding
* @argsz: User filled size of this data
* @version: Version of this data structure
* @format: PASID table entry format
* @flags: Additional information on guest bind request
......@@ -313,21 +319,23 @@ struct iommu_gpasid_bind_data_vtd {
* PASID to host PASID based on this bind data.
*/
struct iommu_gpasid_bind_data {
__u32 argsz;
#define IOMMU_GPASID_BIND_VERSION_1 1
__u32 version;
#define IOMMU_PASID_FORMAT_INTEL_VTD 1
#define IOMMU_PASID_FORMAT_LAST 2
__u32 format;
__u32 addr_width;
#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
__u64 flags;
__u64 gpgd;
__u64 hpasid;
__u64 gpasid;
__u32 addr_width;
__u8 padding[12];
__u8 padding[8];
/* Vendor specific data */
union {
struct iommu_gpasid_bind_data_vtd vtd;
};
} vendor;
};
#endif /* _UAPI_IOMMU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment