Commit 7c567eb1 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Will Deacon

iommu/arm-smmu-v3: Add types for each level of the CD table

As well as indexing helpers arm_smmu_cdtab_l1/2_idx().

Remove CTXDESC_L1_DESC_DWORDS and CTXDESC_CD_DWORDS replacing them all
with type specific calculations.
Tested-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Reviewed-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/7-v4-6416877274e1+1af-smmuv3_tidy_jgg@nvidia.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent c0a25a96
......@@ -1217,17 +1217,18 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master,
arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
static void arm_smmu_write_cd_l1_desc(__le64 *dst, dma_addr_t l2ptr_dma)
static void arm_smmu_write_cd_l1_desc(struct arm_smmu_cdtab_l1 *dst,
dma_addr_t l2ptr_dma)
{
u64 val = (l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | CTXDESC_L1_DESC_V;
/* The HW has 64 bit atomicity with stores to the L2 CD table */
WRITE_ONCE(*dst, cpu_to_le64(val));
WRITE_ONCE(dst->l2ptr, cpu_to_le64(val));
}
static dma_addr_t arm_smmu_cd_l1_get_desc(const __le64 *src)
static dma_addr_t arm_smmu_cd_l1_get_desc(const struct arm_smmu_cdtab_l1 *src)
{
return le64_to_cpu(*src) & CTXDESC_L1_DESC_L2PTR_MASK;
return le64_to_cpu(src->l2ptr) & CTXDESC_L1_DESC_L2PTR_MASK;
}
struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
......@@ -1240,13 +1241,12 @@ struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
return NULL;
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
return (struct arm_smmu_cd *)(cd_table->cdtab +
ssid * CTXDESC_CD_DWORDS);
return &((struct arm_smmu_cd *)cd_table->cdtab)[ssid];
l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES];
l1_desc = &cd_table->l1_desc[arm_smmu_cdtab_l1_idx(ssid)];
if (!l1_desc->l2ptr)
return NULL;
return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES];
return &l1_desc->l2ptr->cds[arm_smmu_cdtab_l2_idx(ssid)];
}
static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
......@@ -1264,11 +1264,12 @@ static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
}
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) {
unsigned int idx = ssid / CTXDESC_L2_ENTRIES;
unsigned int idx = arm_smmu_cdtab_l1_idx(ssid);
struct arm_smmu_l1_ctx_desc *l1_desc;
l1_desc = &cd_table->l1_desc[idx];
if (!l1_desc->l2ptr) {
struct arm_smmu_cdtab_l1 *dst;
dma_addr_t l2ptr_dma;
size_t size;
......@@ -1279,8 +1280,8 @@ static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
if (!l1_desc->l2ptr)
return NULL;
arm_smmu_write_cd_l1_desc(&cd_table->cdtab[idx],
l2ptr_dma);
dst = &((struct arm_smmu_cdtab_l1 *)cd_table->cdtab)[idx];
arm_smmu_write_cd_l1_desc(dst, l2ptr_dma);
/* An invalid L1CD can be cached */
arm_smmu_sync_cd(master, ssid, false);
}
......@@ -1424,7 +1425,7 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
cd_table->num_l1_ents = max_contexts;
l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
l1size = max_contexts * sizeof(struct arm_smmu_cd);
} else {
cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts,
......@@ -1436,7 +1437,7 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
if (!cd_table->l1_desc)
return -ENOMEM;
l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
l1size = cd_table->num_l1_ents * sizeof(struct arm_smmu_cdtab_l1);
}
cd_table->cdtab = dma_alloc_coherent(smmu->dev, l1size,
......@@ -1460,27 +1461,29 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
static void arm_smmu_free_cd_tables(struct arm_smmu_master *master)
{
int i;
size_t size, l1size;
size_t l1size;
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
if (cd_table->l1_desc) {
size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
for (i = 0; i < cd_table->num_l1_ents; i++) {
dma_addr_t dma_handle;
if (!cd_table->l1_desc[i].l2ptr)
continue;
dma_free_coherent(smmu->dev, size,
dma_handle = arm_smmu_cd_l1_get_desc(&(
(struct arm_smmu_cdtab_l1 *)cd_table->cdtab)[i]);
dma_free_coherent(smmu->dev,
sizeof(*cd_table->l1_desc[i].l2ptr),
cd_table->l1_desc[i].l2ptr,
arm_smmu_cd_l1_get_desc(
&cd_table->cdtab[i]));
dma_handle);
}
kfree(cd_table->l1_desc);
l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
l1size = cd_table->num_l1_ents * sizeof(struct arm_smmu_cdtab_l1);
} else {
l1size = cd_table->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
l1size = cd_table->num_l1_ents * sizeof(struct arm_smmu_cd);
}
dma_free_coherent(smmu->dev, l1size, cd_table->cdtab, cd_table->cdtab_dma);
......
......@@ -301,7 +301,6 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
*/
#define CTXDESC_L2_ENTRIES 1024
#define CTXDESC_L1_DESC_DWORDS 1
#define CTXDESC_L1_DESC_V (1UL << 0)
#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
......@@ -311,6 +310,24 @@ struct arm_smmu_cd {
__le64 data[CTXDESC_CD_DWORDS];
};
struct arm_smmu_cdtab_l2 {
struct arm_smmu_cd cds[CTXDESC_L2_ENTRIES];
};
struct arm_smmu_cdtab_l1 {
__le64 l2ptr;
};
static inline unsigned int arm_smmu_cdtab_l1_idx(unsigned int ssid)
{
return ssid / CTXDESC_L2_ENTRIES;
}
static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
{
return ssid % CTXDESC_L2_ENTRIES;
}
#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
......@@ -341,7 +358,7 @@ struct arm_smmu_cd {
* When the SMMU only supports linear context descriptor tables, pick a
* reasonable size limit (64kB).
*/
#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / sizeof(struct arm_smmu_cd))
/* Command queue */
#define CMDQ_ENT_SZ_SHIFT 4
......@@ -618,7 +635,7 @@ struct arm_smmu_ctx_desc {
};
struct arm_smmu_l1_ctx_desc {
struct arm_smmu_cd *l2ptr;
struct arm_smmu_cdtab_l2 *l2ptr;
};
struct arm_smmu_ctx_desc_cfg {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment