Commit 90105ae1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v5.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Fixes for page-table issues on Mali GPUs

 - Missing free in an error path for ARM-SMMU

 - PASID decoding in the AMD IOMMU Event log code

 - Another update for the locking fixes in the AMD IOMMU driver

 - Reduce the calls to platform_get_irq() in the IPMMU-VMSA and Rockchip
   IOMMUs to get rid of the warning message added to this function
   recently

* tag 'iommu-fixes-v5.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/amd: Check PM_LEVEL_SIZE() condition in locked section
  iommu/amd: Fix incorrect PASID decoding from event log
  iommu/ipmmu-vmsa: Only call platform_get_irq() when interrupt is mandatory
  iommu/rockchip: Don't use platform_get_irq to implicitly count irqs
  iommu/io-pgtable-arm: Support all Mali configurations
  iommu/io-pgtable-arm: Correct Mali attributes
  iommu/arm-smmu: Free context bitmap in the err path of arm_smmu_init_domain_context
parents 8eb4b3b0 46ac18c3
...@@ -583,7 +583,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) ...@@ -583,7 +583,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
retry: retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
pasid = PPR_PASID(*(u64 *)&event[0]); pasid = (event[0] & EVENT_DOMID_MASK_HI) |
(event[1] & EVENT_DOMID_MASK_LO);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2]; address = (u64)(((u64)event[3]) << 32) | event[2];
...@@ -616,7 +617,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) ...@@ -616,7 +617,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
address, flags); address, flags);
break; break;
case EVENT_TYPE_PAGE_TAB_ERR: case EVENT_TYPE_PAGE_TAB_ERR:
dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n", dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags); pasid, address, flags);
break; break;
...@@ -1463,6 +1464,7 @@ static void free_pagetable(struct protection_domain *domain) ...@@ -1463,6 +1464,7 @@ static void free_pagetable(struct protection_domain *domain)
* to 64 bits. * to 64 bits.
*/ */
static bool increase_address_space(struct protection_domain *domain, static bool increase_address_space(struct protection_domain *domain,
unsigned long address,
gfp_t gfp) gfp_t gfp)
{ {
unsigned long flags; unsigned long flags;
...@@ -1471,8 +1473,8 @@ static bool increase_address_space(struct protection_domain *domain, ...@@ -1471,8 +1473,8 @@ static bool increase_address_space(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) if (address <= PM_LEVEL_SIZE(domain->mode) ||
/* address space already 64 bit large */ WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
goto out; goto out;
pte = (void *)get_zeroed_page(gfp); pte = (void *)get_zeroed_page(gfp);
...@@ -1505,7 +1507,7 @@ static u64 *alloc_pte(struct protection_domain *domain, ...@@ -1505,7 +1507,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
BUG_ON(!is_power_of_2(page_size)); BUG_ON(!is_power_of_2(page_size));
while (address > PM_LEVEL_SIZE(domain->mode)) while (address > PM_LEVEL_SIZE(domain->mode))
*updated = increase_address_space(domain, gfp) || *updated; *updated = increase_address_space(domain, address, gfp) || *updated;
level = domain->mode - 1; level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
......
...@@ -130,8 +130,8 @@ ...@@ -130,8 +130,8 @@
#define EVENT_TYPE_INV_PPR_REQ 0x9 #define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_DEVID_MASK 0xffff #define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0 #define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK 0xffff #define EVENT_DOMID_MASK_LO 0xffff
#define EVENT_DOMID_SHIFT 0 #define EVENT_DOMID_MASK_HI 0xf0000
#define EVENT_FLAGS_MASK 0xfff #define EVENT_FLAGS_MASK 0xfff
#define EVENT_FLAGS_SHIFT 0x10 #define EVENT_FLAGS_SHIFT 0x10
......
...@@ -812,6 +812,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -812,6 +812,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
return 0; return 0;
out_clear_smmu: out_clear_smmu:
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
smmu_domain->smmu = NULL; smmu_domain->smmu = NULL;
out_unlock: out_unlock:
mutex_unlock(&smmu_domain->init_mutex); mutex_unlock(&smmu_domain->init_mutex);
......
...@@ -166,6 +166,9 @@ ...@@ -166,6 +166,9 @@
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
/* IOPTE accessors */ /* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
...@@ -1015,27 +1018,56 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -1015,27 +1018,56 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
static struct io_pgtable * static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{ {
struct io_pgtable *iop; struct arm_lpae_io_pgtable *data;
if (cfg->ias != 48 || cfg->oas > 40) /* No quirks for Mali (hopefully) */
if (cfg->quirks)
return NULL;
if (cfg->ias > 48 || cfg->oas > 40)
return NULL; return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
if (iop) {
u64 mair, ttbr;
/* Copy values as union fields overlap */ data = arm_lpae_alloc_pgtable(cfg);
mair = cfg->arm_lpae_s1_cfg.mair[0]; if (!data)
ttbr = cfg->arm_lpae_s1_cfg.ttbr[0]; return NULL;
cfg->arm_mali_lpae_cfg.memattr = mair; /* Mali seems to need a full 4-level table regardless of IAS */
cfg->arm_mali_lpae_cfg.transtab = ttbr | if (data->levels < ARM_LPAE_MAX_LEVELS) {
ARM_MALI_LPAE_TTBR_READ_INNER | data->levels = ARM_LPAE_MAX_LEVELS;
ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; data->pgd_size = sizeof(arm_lpae_iopte);
} }
/*
* MEMATTR: Mali has no actual notion of a non-cacheable type, so the
* best we can do is mimic the out-of-tree driver and hope that the
* "implementation-defined caching policy" is good enough. Similarly,
* we'll use it for the sake of a valid attribute for our 'device'
* index, although callers should never request that in practice.
*/
cfg->arm_mali_lpae_cfg.memattr =
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
return iop; data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
/* Ensure the empty pgd is visible before TRANSTAB can be written */
wmb();
cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
ARM_MALI_LPAE_TTBR_READ_INNER |
ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
return &data->iop;
out_free_data:
kfree(data);
return NULL;
} }
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
......
...@@ -1086,8 +1086,6 @@ static int ipmmu_probe(struct platform_device *pdev) ...@@ -1086,8 +1086,6 @@ static int ipmmu_probe(struct platform_device *pdev)
mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
irq = platform_get_irq(pdev, 0);
/* /*
* Determine if this IPMMU instance is a root device by checking for * Determine if this IPMMU instance is a root device by checking for
* the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
...@@ -1106,6 +1104,7 @@ static int ipmmu_probe(struct platform_device *pdev) ...@@ -1106,6 +1104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
/* Root devices have mandatory IRQs */ /* Root devices have mandatory IRQs */
if (ipmmu_is_root(mmu)) { if (ipmmu_is_root(mmu)) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(&pdev->dev, "no IRQ found\n"); dev_err(&pdev->dev, "no IRQ found\n");
return irq; return irq;
......
...@@ -100,6 +100,7 @@ struct rk_iommu { ...@@ -100,6 +100,7 @@ struct rk_iommu {
struct device *dev; struct device *dev;
void __iomem **bases; void __iomem **bases;
int num_mmu; int num_mmu;
int num_irq;
struct clk_bulk_data *clocks; struct clk_bulk_data *clocks;
int num_clocks; int num_clocks;
bool reset_disabled; bool reset_disabled;
...@@ -1136,7 +1137,7 @@ static int rk_iommu_probe(struct platform_device *pdev) ...@@ -1136,7 +1137,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct rk_iommu *iommu; struct rk_iommu *iommu;
struct resource *res; struct resource *res;
int num_res = pdev->num_resources; int num_res = pdev->num_resources;
int err, i, irq; int err, i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu) if (!iommu)
...@@ -1163,6 +1164,10 @@ static int rk_iommu_probe(struct platform_device *pdev) ...@@ -1163,6 +1164,10 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0) if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]); return PTR_ERR(iommu->bases[0]);
iommu->num_irq = platform_irq_count(pdev);
if (iommu->num_irq < 0)
return iommu->num_irq;
iommu->reset_disabled = device_property_read_bool(dev, iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset"); "rockchip,disable-mmu-reset");
...@@ -1219,8 +1224,9 @@ static int rk_iommu_probe(struct platform_device *pdev) ...@@ -1219,8 +1224,9 @@ static int rk_iommu_probe(struct platform_device *pdev)
pm_runtime_enable(dev); pm_runtime_enable(dev);
i = 0; for (i = 0; i < iommu->num_irq; i++) {
while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { int irq = platform_get_irq(pdev, i);
if (irq < 0) if (irq < 0)
return irq; return irq;
...@@ -1245,10 +1251,13 @@ static int rk_iommu_probe(struct platform_device *pdev) ...@@ -1245,10 +1251,13 @@ static int rk_iommu_probe(struct platform_device *pdev)
static void rk_iommu_shutdown(struct platform_device *pdev) static void rk_iommu_shutdown(struct platform_device *pdev)
{ {
struct rk_iommu *iommu = platform_get_drvdata(pdev); struct rk_iommu *iommu = platform_get_drvdata(pdev);
int i = 0, irq; int i;
for (i = 0; i < iommu->num_irq; i++) {
int irq = platform_get_irq(pdev, i);
while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
devm_free_irq(iommu->dev, irq, iommu); devm_free_irq(iommu->dev, irq, iommu);
}
pm_runtime_force_suspend(&pdev->dev); pm_runtime_force_suspend(&pdev->dev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment