Commit ac4e52c6 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-joerg/arm-smmu/updates' into for-joerg/arm-smmu/next

* for-joerg/arm-smmu/updates: (29 commits)
  iommu/arm-smmu-qcom: Register the TBU driver in qcom_smmu_impl_init
  iommu/arm-smmu-v3: Enable HTTU for stage1 with io-pgtable mapping
  iommu/arm-smmu-v3: Add support for dirty tracking in domain alloc
  iommu/io-pgtable-arm: Add read_and_clear_dirty() support
  iommu/arm-smmu-v3: Add feature detection for HTTU
  iommu/arm-smmu-v3: Add support for domain_alloc_user fn
  iommu/arm-smmu-qcom: record reason for deferring probe
  iommu/arm-smmu: Pretty-print context fault related regs
  iommu/arm-smmu-qcom-debug: Do not print for handled faults
  iommu/arm-smmu: Add CB prefix to register bitfields
  iommu/arm-smmu-v3: add missing MODULE_DESCRIPTION() macro
  iommu/arm-smmu-v3: Shrink the strtab l1_desc array
  iommu/arm-smmu-v3: Do not zero the strtab twice
  iommu/arm-smmu-v3: Allow setting a S1 domain to a PASID
  iommu/arm-smmu-v3: Allow a PASID to be set when RID is IDENTITY/BLOCKED
  iommu/arm-smmu-v3: Test the STE S1DSS functionality
  iommu/arm-smmu-v3: Allow IDENTITY/BLOCKED to be set while PASID is used
  iommu/arm-smmu-v3: Put the SVA mmu notifier in the smmu_domain
  iommu/arm-smmu-v3: Keep track of arm_smmu_master_domain for SVA
  iommu/arm-smmu-v3: Make SVA allocate a normal arm_smmu_domain
  ...
parents a4ce3999 0b4eeee2
......@@ -394,6 +394,7 @@ config ARM_SMMU_V3
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select GENERIC_MSI_IRQ
select IOMMUFD_DRIVER if IOMMUFD
help
Support for implementations of the ARM System MMU architecture
version 3 providing translation support to a PCIe root complex.
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
arm_smmu_v3-objs-y += arm-smmu-v3.o
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
arm_smmu_v3-y := arm-smmu-v3.o
arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
......@@ -144,6 +144,14 @@ static void arm_smmu_v3_test_ste_expect_transition(
KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
}
static void arm_smmu_v3_test_ste_expect_non_hitless_transition(
struct kunit *test, const struct arm_smmu_ste *cur,
const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
{
arm_smmu_v3_test_ste_expect_transition(test, cur, target,
num_syncs_expected, false);
}
static void arm_smmu_v3_test_ste_expect_hitless_transition(
struct kunit *test, const struct arm_smmu_ste *cur,
const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
......@@ -155,6 +163,7 @@ static void arm_smmu_v3_test_ste_expect_hitless_transition(
static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
unsigned int s1dss,
const dma_addr_t dma_addr)
{
struct arm_smmu_master master = {
......@@ -164,7 +173,7 @@ static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
.smmu = &smmu,
};
arm_smmu_make_cdtable_ste(ste, &master);
arm_smmu_make_cdtable_ste(ste, &master, true, s1dss);
}
static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
......@@ -194,7 +203,8 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
{
struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
NUM_EXPECTED_SYNCS(2));
}
......@@ -203,7 +213,8 @@ static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
{
struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
NUM_EXPECTED_SYNCS(2));
}
......@@ -212,7 +223,8 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
{
struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
NUM_EXPECTED_SYNCS(3));
}
......@@ -221,17 +233,59 @@ static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
{
struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
NUM_EXPECTED_SYNCS(3));
}
static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test)
{
struct arm_smmu_ste ste;
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
fake_cdtab_dma_addr);
/*
* Flipping s1dss on a CD table STE only involves changes to the second
* qword of an STE and can be done in a single write.
*/
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(1));
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &s1dss_bypass, &ste, NUM_EXPECTED_SYNCS(1));
}
static void
arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test)
{
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2));
}
static void
arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test)
{
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2));
}
static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
bool ats_enabled)
{
struct arm_smmu_master master = {
.smmu = &smmu,
.ats_enabled = ats_enabled,
};
struct io_pgtable io_pgtable = {};
struct arm_smmu_domain smmu_domain = {
......@@ -247,7 +301,7 @@ static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain);
arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain, ats_enabled);
}
static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
......@@ -286,6 +340,48 @@ static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
NUM_EXPECTED_SYNCS(2));
}
static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test)
{
struct arm_smmu_ste s1_ste;
struct arm_smmu_ste s2_ste;
arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_test_make_s2_ste(&s2_ste, true);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
NUM_EXPECTED_SYNCS(3));
}
static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test)
{
struct arm_smmu_ste s1_ste;
struct arm_smmu_ste s2_ste;
arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_test_make_s2_ste(&s2_ste, true);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
NUM_EXPECTED_SYNCS(3));
}
static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test)
{
struct arm_smmu_ste ste;
struct arm_smmu_ste ste_2;
/*
* Although no flow resembles this in practice, one way to force an STE
* update to be non-hitless is to change its CD table pointer as well as
* s1 dss field in the same update.
*/
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS,
0x4B4B4b4B4B);
arm_smmu_v3_test_ste_expect_non_hitless_transition(
test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3));
}
static void arm_smmu_v3_test_cd_expect_transition(
struct kunit *test, const struct arm_smmu_cd *cur,
const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
......@@ -439,10 +535,16 @@ static struct kunit_case arm_smmu_v3_test_cases[] = {
KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_s1dss_change),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass),
KUNIT_CASE(arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1),
KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless),
KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
......@@ -465,4 +567,5 @@ static struct kunit_suite arm_smmu_v3_test_module = {
kunit_test_suites(&arm_smmu_v3_test_module);
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
MODULE_DESCRIPTION("KUnit tests for arm-smmu-v3 driver");
MODULE_LICENSE("GPL v2");
This diff is collapsed.
......@@ -33,6 +33,9 @@
#define IDR0_ASID16 (1 << 12)
#define IDR0_ATS (1 << 10)
#define IDR0_HYP (1 << 9)
#define IDR0_HTTU GENMASK(7, 6)
#define IDR0_HTTU_ACCESS 1
#define IDR0_HTTU_ACCESS_DIRTY 2
#define IDR0_COHACC (1 << 4)
#define IDR0_TTF GENMASK(3, 2)
#define IDR0_TTF_AARCH64 2
......@@ -301,6 +304,9 @@ struct arm_smmu_cd {
#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
#define CTXDESC_CD_0_TCR_HA (1UL << 43)
#define CTXDESC_CD_0_TCR_HD (1UL << 42)
#define CTXDESC_CD_0_AA64 (1UL << 41)
#define CTXDESC_CD_0_S (1UL << 44)
#define CTXDESC_CD_0_R (1UL << 45)
......@@ -579,17 +585,11 @@ struct arm_smmu_priq {
/* High-level stream table and context descriptor structures */
struct arm_smmu_strtab_l1_desc {
u8 span;
struct arm_smmu_ste *l2ptr;
dma_addr_t l2ptr_dma;
};
struct arm_smmu_ctx_desc {
u16 asid;
refcount_t refs;
struct mm_struct *mm;
};
struct arm_smmu_l1_ctx_desc {
......@@ -602,11 +602,19 @@ struct arm_smmu_ctx_desc_cfg {
dma_addr_t cdtab_dma;
struct arm_smmu_l1_ctx_desc *l1_desc;
unsigned int num_l1_ents;
unsigned int used_ssids;
u8 in_ste;
u8 s1fmt;
/* log2 of the maximum number of CDs supported by this table */
u8 s1cdmax;
};
/* True if the cd table has SSIDS > 0 in use. */
static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table)
{
return cd_table->used_ssids;
}
struct arm_smmu_s2_cfg {
u16 vmid;
};
......@@ -648,6 +656,8 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19)
#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
#define ARM_SMMU_FEAT_HA (1 << 21)
#define ARM_SMMU_FEAT_HD (1 << 22)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
......@@ -696,16 +706,15 @@ struct arm_smmu_stream {
struct arm_smmu_master {
struct arm_smmu_device *smmu;
struct device *dev;
struct list_head domain_head;
struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */
struct arm_smmu_ctx_desc_cfg cd_table;
unsigned int num_streams;
bool ats_enabled;
bool ats_enabled : 1;
bool ste_ats_enabled : 1;
bool stall_enabled;
bool sva_enabled;
bool iopf_enabled;
struct list_head bonds;
unsigned int ssid_bits;
};
......@@ -730,10 +739,11 @@ struct arm_smmu_domain {
struct iommu_domain domain;
/* List of struct arm_smmu_master_domain */
struct list_head devices;
spinlock_t devices_lock;
struct list_head mmu_notifiers;
struct mmu_notifier mmu_notifier;
};
/* The following are exposed for testing purposes. */
......@@ -757,15 +767,23 @@ void arm_smmu_make_abort_ste(struct arm_smmu_ste *target);
void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
struct arm_smmu_ste *target);
void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master);
struct arm_smmu_master *master, bool ats_enabled,
unsigned int s1dss);
void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain);
struct arm_smmu_domain *smmu_domain,
bool ats_enabled);
void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
struct arm_smmu_master *master, struct mm_struct *mm,
u16 asid);
#endif
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
ioasid_t ssid;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
......@@ -774,11 +792,11 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock;
struct arm_smmu_domain *arm_smmu_domain_alloc(void);
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
u32 ssid);
struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
u32 ssid);
void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain);
......@@ -786,12 +804,15 @@ void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
struct arm_smmu_cd *cdptr,
const struct arm_smmu_cd *target);
int arm_smmu_set_pasid(struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain, ioasid_t pasid,
struct arm_smmu_cd *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
size_t granule, bool leaf,
struct arm_smmu_domain *smmu_domain);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
unsigned long iova, size_t size);
#ifdef CONFIG_ARM_SMMU_V3_SVA
......@@ -802,9 +823,8 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
void arm_smmu_sva_notifier_synchronize(void);
struct iommu_domain *arm_smmu_sva_domain_alloc(void);
void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id);
struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm);
#else /* CONFIG_ARM_SMMU_V3_SVA */
static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
......@@ -838,10 +858,7 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master
static inline void arm_smmu_sva_notifier_synchronize(void) {}
static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void)
{
return NULL;
}
#define arm_smmu_sva_domain_alloc NULL
static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev,
......
......@@ -200,7 +200,7 @@ static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
if (!(fsr & ARM_SMMU_FSR_FAULT))
if (!(fsr & ARM_SMMU_CB_FSR_FAULT))
return IRQ_NONE;
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
......
......@@ -141,7 +141,7 @@ static int qcom_tbu_halt(struct qcom_tbu *tbu, struct arm_smmu_domain *smmu_doma
writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if ((fsr & ARM_SMMU_FSR_FAULT) && (fsr & ARM_SMMU_FSR_SS)) {
if ((fsr & ARM_SMMU_CB_FSR_FAULT) && (fsr & ARM_SMMU_CB_FSR_SS)) {
u32 sctlr_orig, sctlr;
/*
......@@ -298,7 +298,7 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if (fsr & ARM_SMMU_FSR_FAULT) {
if (fsr & ARM_SMMU_CB_FSR_FAULT) {
/* Clear pending interrupts */
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
......@@ -306,7 +306,7 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
* TBU halt takes care of resuming any stalled transcation.
* Kept it here for completeness sake.
*/
if (fsr & ARM_SMMU_FSR_SS)
if (fsr & ARM_SMMU_CB_FSR_SS)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
ARM_SMMU_RESUME_TERMINATE);
}
......@@ -320,11 +320,11 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
phys = qcom_tbu_trigger_atos(smmu_domain, tbu, iova, sid);
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if (fsr & ARM_SMMU_FSR_FAULT) {
if (fsr & ARM_SMMU_CB_FSR_FAULT) {
/* Clear pending interrupts */
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
if (fsr & ARM_SMMU_FSR_SS)
if (fsr & ARM_SMMU_CB_FSR_SS)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
ARM_SMMU_RESUME_TERMINATE);
}
......@@ -383,68 +383,44 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
struct arm_smmu_domain *smmu_domain = dev;
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
struct arm_smmu_device *smmu = smmu_domain->smmu;
u32 fsr, fsynr, cbfrsynra, resume = 0;
struct arm_smmu_context_fault_info cfi;
u32 resume = 0;
int idx = smmu_domain->cfg.cbndx;
phys_addr_t phys_soft;
unsigned long iova;
int ret, tmp;
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE;
arm_smmu_read_context_fault_info(smmu, idx, &cfi);
fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT))
return IRQ_NONE;
if (list_empty(&tbu_list)) {
ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (ret == -ENOSYS)
dev_err_ratelimited(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
return IRQ_HANDLED;
}
phys_soft = ops->iova_to_phys(ops, iova);
phys_soft = ops->iova_to_phys(ops, cfi.iova);
tmp = report_iommu_fault(&smmu_domain->domain, NULL, iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
tmp = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (!tmp || tmp == -EBUSY) {
dev_dbg(smmu->dev,
"Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
iova, fsr, fsynr, idx);
dev_dbg(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft);
ret = IRQ_HANDLED;
resume = ARM_SMMU_RESUME_TERMINATE;
} else {
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, iova, fsr);
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr);
if (__ratelimit(&_rs)) {
dev_err(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);
dev_err(smmu->dev,
"FSR = %08x [%s%s%s%s%s%s%s%s%s], SID=0x%x\n",
fsr,
(fsr & 0x02) ? "TF " : "",
(fsr & 0x04) ? "AFF " : "",
(fsr & 0x08) ? "PF " : "",
(fsr & 0x10) ? "EF " : "",
(fsr & 0x20) ? "TLBMCF " : "",
(fsr & 0x40) ? "TLBLKF " : "",
(fsr & 0x80) ? "MHF " : "",
(fsr & 0x40000000) ? "SS " : "",
(fsr & 0x80000000) ? "MULTI " : "",
cbfrsynra);
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
dev_err(smmu->dev,
"soft iova-to-phys=%pa\n", &phys_soft);
......@@ -478,17 +454,17 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
*/
if (tmp != -EBUSY) {
/* Clear the faulting FSR */
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
/* Retry or terminate any stalled transactions */
if (fsr & ARM_SMMU_FSR_SS)
if (cfi.fsr & ARM_SMMU_CB_FSR_SS)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, resume);
}
return ret;
}
static int qcom_tbu_probe(struct platform_device *pdev)
int qcom_tbu_probe(struct platform_device *pdev)
{
struct of_phandle_args args = { .args_count = 2 };
struct device_node *np = pdev->dev.of_node;
......@@ -530,18 +506,3 @@ static int qcom_tbu_probe(struct platform_device *pdev)
return 0;
}
static const struct of_device_id qcom_tbu_of_match[] = {
{ .compatible = "qcom,sc7280-tbu" },
{ .compatible = "qcom,sdm845-tbu" },
{ }
};
static struct platform_driver qcom_tbu_driver = {
.driver = {
.name = "qcom_tbu",
.of_match_table = qcom_tbu_of_match,
},
.probe = qcom_tbu_probe,
};
builtin_platform_driver(qcom_tbu_driver);
......@@ -8,6 +8,8 @@
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "arm-smmu.h"
#include "arm-smmu-qcom.h"
......@@ -469,7 +471,8 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
/* Check to make sure qcom_scm has finished probing */
if (!qcom_scm_is_available())
return ERR_PTR(-EPROBE_DEFER);
return ERR_PTR(dev_err_probe(smmu->dev, -EPROBE_DEFER,
"qcom_scm not ready\n"));
qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu)
......@@ -561,10 +564,47 @@ static struct acpi_platform_list qcom_acpi_platlist[] = {
};
#endif
static int qcom_smmu_tbu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM_DEBUG)) {
ret = qcom_tbu_probe(pdev);
if (ret)
return ret;
}
if (dev->pm_domain) {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return 0;
}
static const struct of_device_id qcom_smmu_tbu_of_match[] = {
{ .compatible = "qcom,sc7280-tbu" },
{ .compatible = "qcom,sdm845-tbu" },
{ }
};
static struct platform_driver qcom_smmu_tbu_driver = {
.driver = {
.name = "qcom_tbu",
.of_match_table = qcom_smmu_tbu_of_match,
},
.probe = qcom_smmu_tbu_probe,
};
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
{
const struct device_node *np = smmu->dev->of_node;
const struct of_device_id *match;
static u8 tbu_registered;
if (!tbu_registered++)
platform_driver_register(&qcom_smmu_tbu_driver);
#ifdef CONFIG_ACPI
if (np == NULL) {
......
......@@ -34,8 +34,10 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev);
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
int qcom_tbu_probe(struct platform_device *pdev);
#else
static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
static inline int qcom_tbu_probe(struct platform_device *pdev) { return -EINVAL; }
#endif
#endif /* _ARM_SMMU_QCOM_H */
......@@ -405,32 +405,72 @@ static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
};
void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
struct arm_smmu_context_fault_info *cfi)
{
cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
}
void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
const struct arm_smmu_context_fault_info *cfi)
{
dev_dbg(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
dev_err(smmu->dev, "FSR = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n",
cfi->fsr,
(cfi->fsr & ARM_SMMU_CB_FSR_MULTI) ? "MULTI " : "",
(cfi->fsr & ARM_SMMU_CB_FSR_SS) ? "SS " : "",
(u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr),
(cfi->fsr & ARM_SMMU_CB_FSR_UUT) ? " UUT" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_ASF) ? " ASF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_EF) ? " EF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_PF) ? " PF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_AFF) ? " AFF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_TF) ? " TF" : "",
cfi->cbfrsynra);
dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n",
cfi->fsynr,
(u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr),
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "",
(u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr));
}
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
u32 fsr, fsynr, cbfrsynra;
unsigned long iova;
struct arm_smmu_context_fault_info cfi;
struct arm_smmu_domain *smmu_domain = dev;
struct arm_smmu_device *smmu = smmu_domain->smmu;
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
int idx = smmu_domain->cfg.cbndx;
int ret;
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE;
arm_smmu_read_context_fault_info(smmu, idx, &cfi);
fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT))
return IRQ_NONE;
ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (ret == -ENOSYS)
dev_err_ratelimited(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);
if (ret == -ENOSYS && __ratelimit(&rs))
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
return IRQ_HANDLED;
}
......@@ -1306,7 +1346,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_CB_ATSR_ACTIVE),
5, 50)) {
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
dev_err(dev,
......@@ -1642,7 +1682,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Make sure all context banks are disabled and clear CB_FSR */
for (i = 0; i < smmu->num_context_banks; ++i) {
arm_smmu_write_context_bank(smmu, i);
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT);
}
/* Invalidate the TLB, just in case */
......
......@@ -196,34 +196,42 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_PAR_F BIT(0)
#define ARM_SMMU_CB_FSR 0x58
#define ARM_SMMU_FSR_MULTI BIT(31)
#define ARM_SMMU_FSR_SS BIT(30)
#define ARM_SMMU_FSR_UUT BIT(8)
#define ARM_SMMU_FSR_ASF BIT(7)
#define ARM_SMMU_FSR_TLBLKF BIT(6)
#define ARM_SMMU_FSR_TLBMCF BIT(5)
#define ARM_SMMU_FSR_EF BIT(4)
#define ARM_SMMU_FSR_PF BIT(3)
#define ARM_SMMU_FSR_AFF BIT(2)
#define ARM_SMMU_FSR_TF BIT(1)
#define ARM_SMMU_FSR_IGN (ARM_SMMU_FSR_AFF | \
ARM_SMMU_FSR_ASF | \
ARM_SMMU_FSR_TLBMCF | \
ARM_SMMU_FSR_TLBLKF)
#define ARM_SMMU_FSR_FAULT (ARM_SMMU_FSR_MULTI | \
ARM_SMMU_FSR_SS | \
ARM_SMMU_FSR_UUT | \
ARM_SMMU_FSR_EF | \
ARM_SMMU_FSR_PF | \
ARM_SMMU_FSR_TF | \
ARM_SMMU_FSR_IGN)
#define ARM_SMMU_CB_FSR_MULTI BIT(31)
#define ARM_SMMU_CB_FSR_SS BIT(30)
#define ARM_SMMU_CB_FSR_FORMAT GENMASK(10, 9)
#define ARM_SMMU_CB_FSR_UUT BIT(8)
#define ARM_SMMU_CB_FSR_ASF BIT(7)
#define ARM_SMMU_CB_FSR_TLBLKF BIT(6)
#define ARM_SMMU_CB_FSR_TLBMCF BIT(5)
#define ARM_SMMU_CB_FSR_EF BIT(4)
#define ARM_SMMU_CB_FSR_PF BIT(3)
#define ARM_SMMU_CB_FSR_AFF BIT(2)
#define ARM_SMMU_CB_FSR_TF BIT(1)
#define ARM_SMMU_CB_FSR_IGN (ARM_SMMU_CB_FSR_AFF | \
ARM_SMMU_CB_FSR_ASF | \
ARM_SMMU_CB_FSR_TLBMCF | \
ARM_SMMU_CB_FSR_TLBLKF)
#define ARM_SMMU_CB_FSR_FAULT (ARM_SMMU_CB_FSR_MULTI | \
ARM_SMMU_CB_FSR_SS | \
ARM_SMMU_CB_FSR_UUT | \
ARM_SMMU_CB_FSR_EF | \
ARM_SMMU_CB_FSR_PF | \
ARM_SMMU_CB_FSR_TF | \
ARM_SMMU_CB_FSR_IGN)
#define ARM_SMMU_CB_FAR 0x60
#define ARM_SMMU_CB_FSYNR0 0x68
#define ARM_SMMU_FSYNR0_WNR BIT(4)
#define ARM_SMMU_CB_FSYNR0_PLVL GENMASK(1, 0)
#define ARM_SMMU_CB_FSYNR0_WNR BIT(4)
#define ARM_SMMU_CB_FSYNR0_PNU BIT(5)
#define ARM_SMMU_CB_FSYNR0_IND BIT(6)
#define ARM_SMMU_CB_FSYNR0_NSATTR BIT(8)
#define ARM_SMMU_CB_FSYNR0_PTWF BIT(10)
#define ARM_SMMU_CB_FSYNR0_AFR BIT(11)
#define ARM_SMMU_CB_FSYNR0_S1CBNDX GENMASK(23, 16)
#define ARM_SMMU_CB_FSYNR1 0x6c
......@@ -237,7 +245,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
#define ARM_SMMU_ATSR_ACTIVE BIT(0)
#define ARM_SMMU_CB_ATSR_ACTIVE BIT(0)
#define ARM_SMMU_RESUME_TERMINATE BIT(0)
......@@ -533,4 +541,17 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx);
int arm_mmu500_reset(struct arm_smmu_device *smmu);
struct arm_smmu_context_fault_info {
unsigned long iova;
u32 fsr;
u32 fsynr;
u32 cbfrsynra;
};
void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
struct arm_smmu_context_fault_info *cfi);
void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
const struct arm_smmu_context_fault_info *cfi);
#endif /* _ARM_SMMU_H */
......@@ -194,7 +194,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
if (!(fsr & ARM_SMMU_FSR_FAULT))
if (!(fsr & ARM_SMMU_CB_FSR_FAULT))
return IRQ_NONE;
fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
......@@ -274,7 +274,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* Clear context bank fault address fault status registers */
iommu_writel(ctx, ARM_SMMU_CB_FAR, 0);
iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT);
/* TTBRs */
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
......
......@@ -76,6 +76,7 @@
#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
#define ARM_LPAE_PTE_DBM (((arm_lpae_iopte)1) << 51)
#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
......@@ -85,7 +86,7 @@
#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
/* Ignore the contiguous bit for block splitting */
#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
#define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
ARM_LPAE_PTE_ATTR_HI_MASK)
/* Software bit for solving coherency races */
......@@ -93,7 +94,11 @@
/* Stage-1 PTE */
#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
#define ARM_LPAE_PTE_AP_RDONLY_BIT 7
#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)1) << \
ARM_LPAE_PTE_AP_RDONLY_BIT)
#define ARM_LPAE_PTE_AP_WR_CLEAN_MASK (ARM_LPAE_PTE_AP_RDONLY | \
ARM_LPAE_PTE_DBM)
#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
......@@ -139,6 +144,12 @@
#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
#define iopte_writeable_dirty(pte) \
(((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
#define iopte_set_writeable_clean(ptep) \
set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
......@@ -160,6 +171,13 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
}
static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
{
if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
return false;
return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
}
static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
struct arm_lpae_io_pgtable *data)
{
......@@ -422,6 +440,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
pte = ARM_LPAE_PTE_nG;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY;
else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
pte |= ARM_LPAE_PTE_DBM;
if (!(prot & IOMMU_PRIV))
pte |= ARM_LPAE_PTE_AP_UNPRIV;
} else {
......@@ -726,6 +746,97 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
return iopte_to_paddr(pte, data) | iova;
}
struct io_pgtable_walk_data {
struct iommu_dirty_bitmap *dirty;
unsigned long flags;
u64 addr;
const u64 end;
};
static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
struct io_pgtable_walk_data *walk_data,
arm_lpae_iopte *ptep,
int lvl);
static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
struct io_pgtable_walk_data *walk_data,
arm_lpae_iopte *ptep, int lvl)
{
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte pte = READ_ONCE(*ptep);
if (iopte_leaf(pte, lvl, iop->fmt)) {
size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
if (iopte_writeable_dirty(pte)) {
iommu_dirty_bitmap_record(walk_data->dirty,
walk_data->addr, size);
if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
iopte_set_writeable_clean(ptep);
}
walk_data->addr += size;
return 0;
}
if (WARN_ON(!iopte_table(pte, lvl)))
return -EINVAL;
ptep = iopte_deref(pte, data);
return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
}
static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
struct io_pgtable_walk_data *walk_data,
arm_lpae_iopte *ptep,
int lvl)
{
u32 idx;
int max_entries, ret;
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return -EINVAL;
if (lvl == data->start_level)
max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
else
max_entries = ARM_LPAE_PTES_PER_TABLE(data);
for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
(idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
if (ret)
return ret;
}
return 0;
}
static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long flags,
struct iommu_dirty_bitmap *dirty)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct io_pgtable_walk_data walk_data = {
.dirty = dirty,
.flags = flags,
.addr = iova,
.end = iova + size,
};
arm_lpae_iopte *ptep = data->pgd;
int lvl = data->start_level;
if (WARN_ON(!size))
return -EINVAL;
if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
return -EINVAL;
if (data->iop.fmt != ARM_64_LPAE_S1)
return -EINVAL;
return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
{
unsigned long granule, page_sizes;
......@@ -804,6 +915,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.map_pages = arm_lpae_map_pages,
.unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys,
.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
};
return data;
......@@ -819,7 +931,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
IO_PGTABLE_QUIRK_ARM_HD))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
......
......@@ -114,6 +114,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
return ERR_PTR(-EOPNOTSUPP);
if (flags & ~valid_flags)
return ERR_PTR(-EOPNOTSUPP);
if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
!device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
return ERR_PTR(-EOPNOTSUPP);
hwpt_paging = __iommufd_object_alloc(
ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
......
......@@ -85,6 +85,8 @@ struct io_pgtable_cfg {
*
* IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
* attributes set in the TCR for a non-coherent page-table walker.
*
* IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
......@@ -92,6 +94,7 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
#define IO_PGTABLE_QUIRK_ARM_HD BIT(7)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment