Commit 7bdecc26 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v6.4-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:
 "AMD IOMMU fixes:
   - Fix domain type and size checks
   - IOTLB flush fix for invalidating ranges
   - Guest IRQ handling fixes and GALOG overflow fix

  Rockchip IOMMU:
   - Error handling fix

  Mediatek IOMMU:
   - IOTLB flushing fix

  Renesas IOMMU:
   - Fix Kconfig dependencies to avoid build errors on RiscV"

* tag 'iommu-fixes-v6.4-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/mediatek: Flush IOTLB completely only if domain has been attached
  iommu/amd/pgtbl_v2: Fix domain max address
  iommu/amd: Fix domain flush size when syncing iotlb
  iommu/amd: Add missing domain type checks
  iommu/amd: Fix up merge conflict resolution
  iommu/amd: Handle GALog overflows
  iommu/amd: Don't block updates to GATag if guest mode is on
  iommu/rockchip: Fix unwind goto issue
  iommu: Make IPMMU_VMSA dependencies more strict
parents e99a7467 b3fc9570
......@@ -282,6 +282,7 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
depends on ARCH_RENESAS || COMPILE_TEST
depends on ARM || ARM64 || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
......
......@@ -15,9 +15,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
......
......@@ -758,6 +758,30 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
}
/*
* This function restarts event logging in case the IOMMU experienced
* an GA log overflow.
*/
void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
{
u32 status;
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & MMIO_STATUS_GALOG_RUN_MASK)
return;
pr_info_ratelimited("IOMMU GA Log restarting\n");
iommu_feature_disable(iommu, CONTROL_GALOG_EN);
iommu_feature_disable(iommu, CONTROL_GAINT_EN);
writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
iommu_feature_enable(iommu, CONTROL_GAINT_EN);
iommu_feature_enable(iommu, CONTROL_GALOG_EN);
}
/*
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
......
......@@ -845,6 +845,7 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
(MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
MMIO_STATUS_EVT_INT_MASK | \
MMIO_STATUS_PPR_INT_MASK | \
MMIO_STATUS_GALOG_OVERFLOW_MASK | \
MMIO_STATUS_GALOG_INT_MASK)
irqreturn_t amd_iommu_int_thread(int irq, void *data)
......@@ -868,10 +869,16 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
}
#ifdef CONFIG_IRQ_REMAP
if (status & MMIO_STATUS_GALOG_INT_MASK) {
if (status & (MMIO_STATUS_GALOG_INT_MASK |
MMIO_STATUS_GALOG_OVERFLOW_MASK)) {
pr_devel("Processing IOMMU GA Log\n");
iommu_poll_ga_log(iommu);
}
if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) {
pr_info_ratelimited("IOMMU GA Log overflow\n");
amd_iommu_restart_ga_log(iommu);
}
#endif
if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
......@@ -2067,7 +2074,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
int pgtable = amd_iommu_pgtable;
int pgtable;
int mode = DEFAULT_PGTABLE_LEVEL;
int ret;
......@@ -2084,6 +2091,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
mode = PAGE_MODE_NONE;
} else if (type == IOMMU_DOMAIN_UNMANAGED) {
pgtable = AMD_IOMMU_V1;
} else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
pgtable = amd_iommu_pgtable;
} else {
return NULL;
}
switch (pgtable) {
......@@ -2118,6 +2129,15 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
return NULL;
}
static inline u64 dma_max_address(void)
{
if (amd_iommu_pgtable == AMD_IOMMU_V1)
return ~0ULL;
/* V2 with 4/5 level page table */
return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
}
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
......@@ -2134,7 +2154,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
return NULL;
domain->domain.geometry.aperture_start = 0;
domain->domain.geometry.aperture_end = ~0ULL;
domain->domain.geometry.aperture_end = dma_max_address();
domain->domain.geometry.force_aperture = true;
return &domain->domain;
......@@ -2387,7 +2407,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
......@@ -3493,8 +3513,7 @@ int amd_iommu_activate_guest_mode(void *data)
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
u64 valid;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!entry || entry->lo.fields_vapic.guest_mode)
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
return 0;
valid = entry->lo.fields_vapic.valid;
......
......@@ -781,6 +781,7 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
if (dom->bank)
mtk_iommu_tlb_flush_all(dom->bank->parent_data);
}
......
......@@ -1335,20 +1335,22 @@ static int rk_iommu_probe(struct platform_device *pdev)
for (i = 0; i < iommu->num_irq; i++) {
int irq = platform_get_irq(pdev, i);
if (irq < 0)
return irq;
if (irq < 0) {
err = irq;
goto err_pm_disable;
}
err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
IRQF_SHARED, dev_name(dev), iommu);
if (err) {
pm_runtime_disable(dev);
goto err_remove_sysfs;
}
if (err)
goto err_pm_disable;
}
dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
return 0;
err_pm_disable:
pm_runtime_disable(dev);
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
err_put_group:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment