Commit 3e630336 authored by Jean-Philippe Brucker's avatar Jean-Philippe Brucker Committed by Will Deacon

iommu/arm-smmu-v3: Seize private ASID

The SMMU has a single ASID space, the union of shared and private ASID
sets. This means that the SMMU driver competes with the arch allocator
for ASIDs. Shared ASIDs are those of Linux processes, allocated by the
arch, and contribute in broadcast TLB maintenance. Private ASIDs are
allocated by the SMMU driver and used for "classic" map/unmap DMA. They
require command-queue TLB invalidations.

When we pin down an mm_context and get an ASID that is already in use by
the SMMU, it belongs to a private context. We used to simply abort the
bind, but this is unfair to users that would be unable to bind a few
seemingly random processes. Try to allocate a new private ASID for the
context, and make the old ASID shared.
Signed-off-by: default avatarJean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/20200918101852.582559-10-jean-philippe@linaro.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 3f1ce8e8
......@@ -10,10 +10,18 @@
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
/*
* Check if the CPU ASID is available on the SMMU side. If a private context
* descriptor is using it, try to replace it.
*/
static struct arm_smmu_ctx_desc *
arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
{
int ret;
u32 new_asid;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain;
cd = xa_load(&arm_smmu_asid_xa, asid);
if (!cd)
......@@ -27,8 +35,31 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
return cd;
}
/* Ouch, ASID is already in use for a private cd. */
return ERR_PTR(-EBUSY);
smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
smmu = smmu_domain->smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
if (ret)
return ERR_PTR(-ENOSPC);
/*
* Race with unmap: TLB invalidations will start targeting the new ASID,
* which isn't assigned yet. We'll do an invalidate-all on the old ASID
* later, so it doesn't matter.
*/
cd->asid = new_asid;
/*
* Update ASID and invalidate CD in all associated masters. There will
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
xa_erase(&arm_smmu_asid_xa, asid);
return NULL;
}
__maybe_unused
......
......@@ -882,6 +882,17 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
}
/* Context descriptor manipulation functions */
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
{
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_TLBI_NH_ASID,
.tlbi.asid = asid,
};
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
}
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
int ssid, bool leaf)
{
......@@ -962,8 +973,8 @@ static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
}
static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
int ssid, struct arm_smmu_ctx_desc *cd)
int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
struct arm_smmu_ctx_desc *cd)
{
/*
* This function handles the following cases:
......@@ -1619,15 +1630,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
cmd.tlbi.vmid = 0;
} else {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
/*
* NOTE: when io-pgtable is in non-strict mode, we may get here with
* PTEs previously cleared by unmaps on the current CPU not yet visible
......@@ -1635,8 +1637,14 @@ static void arm_smmu_tlb_inv_context(void *cookie)
* insertion to guarantee those are observed before the TLBI. Do be
* careful, 007.
*/
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
} else {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
}
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}
......
......@@ -679,6 +679,9 @@ struct arm_smmu_domain {
extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock;
int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
#endif /* _ARM_SMMU_V3_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment