Commit d5f583bf authored by Nicolin Chen's avatar Nicolin Chen Committed by Will Deacon

iommu/tegra-smmu: Expand mutex protection range

This is used to protect potential race condition at use_count.
since probes of client drivers, calling attach_dev(), may run
concurrently.
Signed-off-by: default avatarNicolin Chen <nicoleotsuka@gmail.com>
Tested-by: default avatarDmitry Osipenko <digetx@gmail.com>
Reviewed-by: default avatarDmitry Osipenko <digetx@gmail.com>
Acked-by: default avatarThierry Reding <treding@nvidia.com>
Link: https://lore.kernel.org/r/20201125101013.14953-3-nicoleotsuka@gmail.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent cf910f61
...@@ -256,26 +256,19 @@ static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) ...@@ -256,26 +256,19 @@ static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
{ {
unsigned long id; unsigned long id;
mutex_lock(&smmu->lock);
id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
if (id >= smmu->soc->num_asids) { if (id >= smmu->soc->num_asids)
mutex_unlock(&smmu->lock);
return -ENOSPC; return -ENOSPC;
}
set_bit(id, smmu->asids); set_bit(id, smmu->asids);
*idp = id; *idp = id;
mutex_unlock(&smmu->lock);
return 0; return 0;
} }
static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
{ {
mutex_lock(&smmu->lock);
clear_bit(id, smmu->asids); clear_bit(id, smmu->asids);
mutex_unlock(&smmu->lock);
} }
static bool tegra_smmu_capable(enum iommu_cap cap) static bool tegra_smmu_capable(enum iommu_cap cap)
...@@ -420,17 +413,21 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, ...@@ -420,17 +413,21 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
struct tegra_smmu_as *as) struct tegra_smmu_as *as)
{ {
u32 value; u32 value;
int err; int err = 0;
mutex_lock(&smmu->lock);
if (as->use_count > 0) { if (as->use_count > 0) {
as->use_count++; as->use_count++;
return 0; goto unlock;
} }
as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, as->pd_dma)) if (dma_mapping_error(smmu->dev, as->pd_dma)) {
return -ENOMEM; err = -ENOMEM;
goto unlock;
}
/* We can't handle 64-bit DMA addresses */ /* We can't handle 64-bit DMA addresses */
if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
...@@ -453,24 +450,35 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, ...@@ -453,24 +450,35 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
as->smmu = smmu; as->smmu = smmu;
as->use_count++; as->use_count++;
mutex_unlock(&smmu->lock);
return 0; return 0;
err_unmap: err_unmap:
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
unlock:
mutex_unlock(&smmu->lock);
return err; return err;
} }
static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
struct tegra_smmu_as *as) struct tegra_smmu_as *as)
{ {
if (--as->use_count > 0) mutex_lock(&smmu->lock);
if (--as->use_count > 0) {
mutex_unlock(&smmu->lock);
return; return;
}
tegra_smmu_free_asid(smmu, as->id); tegra_smmu_free_asid(smmu, as->id);
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
as->smmu = NULL; as->smmu = NULL;
mutex_unlock(&smmu->lock);
} }
static int tegra_smmu_attach_dev(struct iommu_domain *domain, static int tegra_smmu_attach_dev(struct iommu_domain *domain,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment