Commit a9d40285 authored by Nicolin Chen's avatar Nicolin Chen Committed by Will Deacon

iommu/tegra241-cmdqv: Limit CMDs for VCMDQs of a guest owned VINTF

When VCMDQs are assigned to a VINTF owned by a guest (HYP_OWN bit unset),
only TLB and ATC invalidation commands are supported by the VCMDQ HW. So,
implement the new cmdq->supports_cmd op to scan the input cmd in order to
make sure that it is supported by the selected queue.

Note that the guest VM shouldn't have HYP_OWN bit being set regardless of
guest kernel driver writing it or not, i.e. the hypervisor running in the
host OS should wire this bit to zero when trapping a write access to this
VINTF_CONFIG register from a guest kernel.
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Link: https://lore.kernel.org/r/8160292337059b91271045800e5c62f7295e2c24.1724970714.git.nicolinc@nvidia.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent f59e8549
...@@ -346,12 +346,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) ...@@ -346,12 +346,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
return 0; return 0;
} }
static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu) static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent)
{ {
struct arm_smmu_cmdq *cmdq = NULL; struct arm_smmu_cmdq *cmdq = NULL;
if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq) if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq)
cmdq = smmu->impl_ops->get_secondary_cmdq(smmu); cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent);
return cmdq ?: &smmu->cmdq; return cmdq ?: &smmu->cmdq;
} }
...@@ -897,7 +898,7 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, ...@@ -897,7 +898,7 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
} }
return arm_smmu_cmdq_issue_cmdlist( return arm_smmu_cmdq_issue_cmdlist(
smmu, arm_smmu_get_cmdq(smmu), cmd, 1, sync); smmu, arm_smmu_get_cmdq(smmu, ent), cmd, 1, sync);
} }
static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
...@@ -913,10 +914,11 @@ static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu, ...@@ -913,10 +914,11 @@ static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
} }
static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu, static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_batch *cmds) struct arm_smmu_cmdq_batch *cmds,
struct arm_smmu_cmdq_ent *ent)
{ {
cmds->num = 0; cmds->num = 0;
cmds->cmdq = arm_smmu_get_cmdq(smmu); cmds->cmdq = arm_smmu_get_cmdq(smmu, ent);
} }
static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
...@@ -931,13 +933,13 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, ...@@ -931,13 +933,13 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
if (force_sync || unsupported_cmd) { if (force_sync || unsupported_cmd) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
cmds->num, true); cmds->num, true);
arm_smmu_cmdq_batch_init(smmu, cmds); arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
} }
if (cmds->num == CMDQ_BATCH_ENTRIES) { if (cmds->num == CMDQ_BATCH_ENTRIES) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
cmds->num, false); cmds->num, false);
arm_smmu_cmdq_batch_init(smmu, cmds); arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
} }
index = cmds->num * CMDQ_ENT_DWORDS; index = cmds->num * CMDQ_ENT_DWORDS;
...@@ -1205,7 +1207,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master, ...@@ -1205,7 +1207,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master,
}, },
}; };
arm_smmu_cmdq_batch_init(smmu, &cmds); arm_smmu_cmdq_batch_init(smmu, &cmds, &cmd);
for (i = 0; i < master->num_streams; i++) { for (i = 0; i < master->num_streams; i++) {
cmd.cfgi.sid = master->streams[i].id; cmd.cfgi.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
...@@ -2056,7 +2058,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, ...@@ -2056,7 +2058,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd); arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd);
arm_smmu_cmdq_batch_init(master->smmu, &cmds); arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd);
for (i = 0; i < master->num_streams; i++) { for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id; cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
...@@ -2071,7 +2073,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, ...@@ -2071,7 +2073,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master_domain *master_domain; struct arm_smmu_master_domain *master_domain;
int i; int i;
unsigned long flags; unsigned long flags;
struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_ATC_INV,
};
struct arm_smmu_cmdq_batch cmds; struct arm_smmu_cmdq_batch cmds;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
...@@ -2094,7 +2098,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, ...@@ -2094,7 +2098,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
if (!atomic_read(&smmu_domain->nr_ats_masters)) if (!atomic_read(&smmu_domain->nr_ats_masters))
return 0; return 0;
arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds); arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd);
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master_domain, &smmu_domain->devices, list_for_each_entry(master_domain, &smmu_domain->devices,
...@@ -2176,7 +2180,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, ...@@ -2176,7 +2180,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
num_pages++; num_pages++;
} }
arm_smmu_cmdq_batch_init(smmu, &cmds); arm_smmu_cmdq_batch_init(smmu, &cmds, cmd);
while (iova < end) { while (iova < end) {
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
......
...@@ -642,7 +642,8 @@ struct arm_smmu_strtab_cfg { ...@@ -642,7 +642,8 @@ struct arm_smmu_strtab_cfg {
struct arm_smmu_impl_ops { struct arm_smmu_impl_ops {
int (*device_reset)(struct arm_smmu_device *smmu); int (*device_reset)(struct arm_smmu_device *smmu);
void (*device_remove)(struct arm_smmu_device *smmu); void (*device_remove)(struct arm_smmu_device *smmu);
struct arm_smmu_cmdq *(*get_secondary_cmdq)(struct arm_smmu_device *smmu); struct arm_smmu_cmdq *(*get_secondary_cmdq)(
struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent);
}; };
/* An SMMUv3 instance */ /* An SMMUv3 instance */
......
...@@ -142,6 +142,7 @@ struct tegra241_vcmdq { ...@@ -142,6 +142,7 @@ struct tegra241_vcmdq {
* struct tegra241_vintf - Virtual Interface * struct tegra241_vintf - Virtual Interface
* @idx: Global index in the CMDQV * @idx: Global index in the CMDQV
* @enabled: Enable status * @enabled: Enable status
* @hyp_own: Owned by hypervisor (in-kernel)
* @cmdqv: Parent CMDQV pointer * @cmdqv: Parent CMDQV pointer
* @lvcmdqs: List of logical VCMDQ pointers * @lvcmdqs: List of logical VCMDQ pointers
* @base: MMIO base address * @base: MMIO base address
...@@ -150,6 +151,7 @@ struct tegra241_vintf { ...@@ -150,6 +151,7 @@ struct tegra241_vintf {
u16 idx; u16 idx;
bool enabled; bool enabled;
bool hyp_own;
struct tegra241_cmdqv *cmdqv; struct tegra241_cmdqv *cmdqv;
struct tegra241_vcmdq **lvcmdqs; struct tegra241_vcmdq **lvcmdqs;
...@@ -301,8 +303,21 @@ static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid) ...@@ -301,8 +303,21 @@ static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
/* Command Queue Function */ /* Command Queue Function */
static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
{
switch (ent->opcode) {
case CMDQ_OP_TLBI_NH_ASID:
case CMDQ_OP_TLBI_NH_VA:
case CMDQ_OP_ATC_INV:
return true;
default:
return false;
}
}
static struct arm_smmu_cmdq * static struct arm_smmu_cmdq *
tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu) tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent)
{ {
struct tegra241_cmdqv *cmdqv = struct tegra241_cmdqv *cmdqv =
container_of(smmu, struct tegra241_cmdqv, smmu); container_of(smmu, struct tegra241_cmdqv, smmu);
...@@ -328,6 +343,10 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu) ...@@ -328,6 +343,10 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu)
vcmdq = vintf->lvcmdqs[lidx]; vcmdq = vintf->lvcmdqs[lidx];
if (!vcmdq || !READ_ONCE(vcmdq->enabled)) if (!vcmdq || !READ_ONCE(vcmdq->enabled))
return NULL; return NULL;
/* Unsupported CMD goes for smmu->cmdq pathway */
if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
return NULL;
return &vcmdq->cmdq; return &vcmdq->cmdq;
} }
...@@ -406,12 +425,22 @@ static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own) ...@@ -406,12 +425,22 @@ static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
tegra241_vintf_hw_deinit(vintf); tegra241_vintf_hw_deinit(vintf);
/* Configure and enable VINTF */ /* Configure and enable VINTF */
/*
* Note that HYP_OWN bit is wired to zero when running in guest kernel,
* whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
* restricted set of supported commands.
*/
regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own); regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
writel(regval, REG_VINTF(vintf, CONFIG)); writel(regval, REG_VINTF(vintf, CONFIG));
ret = vintf_write_config(vintf, regval | VINTF_EN); ret = vintf_write_config(vintf, regval | VINTF_EN);
if (ret) if (ret)
return ret; return ret;
/*
* As being mentioned above, HYP_OWN bit is wired to zero for a guest
* kernel, so read it back from HW to ensure that reflects in hyp_own
*/
vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) { for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) { if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
...@@ -493,6 +522,9 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq) ...@@ -493,6 +522,9 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
q->q_base = q->base_dma & VCMDQ_ADDR; q->q_base = q->base_dma & VCMDQ_ADDR;
q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift); q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
if (!vcmdq->vintf->hyp_own)
cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
return arm_smmu_cmdq_init(smmu, cmdq); return arm_smmu_cmdq_init(smmu, cmdq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment