Commit e58e501a authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'tegra-for-3.19-iommu' of...

Merge tag 'tegra-for-3.19-iommu' of git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux into next/drivers

Pull "ARM: tegra: IOMMU support for v3.19" from Thierry Reding:

This adds the driver pieces required for IOMMU support on Tegra30,
Tegra114 and Tegra124.

* tag 'tegra-for-3.19-iommu' of git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux:
  memory: Add NVIDIA Tegra memory controller support
  of: Add NVIDIA Tegra memory controller binding
  ARM: tegra: Move AHB Kconfig to drivers/amba
  amba: Add Kconfig file
  clk: tegra: Implement memory-controller clock
  powerpc/iommu: Rename iommu_[un]map_sg functions
  iommu: Improve error handling when setting bus iommu
  iommu: Do more input validation in iommu_map_sg()
  iommu: Add iommu_map_sg() function
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parents a8afa264 89184651
NVIDIA Tegra Memory Controller device tree bindings
===================================================
Required properties:
- compatible: Should be "nvidia,tegra<chip>-mc"
- reg: Physical base address and length of the controller's registers.
- clocks: Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- mc: the module's clock input
- interrupts: The interrupt outputs from the controller.
- #iommu-cells: Should be 1. The single cell of the IOMMU specifier defines
the SWGROUP of the master.
This device implements an IOMMU that complies with the generic IOMMU binding.
See ../iommu/iommu.txt for details.
Example:
--------
mc: memory-controller@0,70019000 {
compatible = "nvidia,tegra124-mc";
reg = <0x0 0x70019000 0x0 0x1000>;
clocks = <&tegra_car TEGRA124_CLK_MC>;
clock-names = "mc";
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
};
sdhci@0,700b0000 {
compatible = "nvidia,tegra124-sdhci";
...
iommus = <&mc TEGRA_SWGROUP_SDMMC1A>;
};
......@@ -1259,9 +1259,6 @@ source "arch/arm/common/Kconfig"
menu "Bus support"
config ARM_AMBA
bool
config ISA
bool
help
......
......@@ -2,6 +2,7 @@ menuconfig ARCH_TEGRA
bool "NVIDIA Tegra" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
select ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
select ARM_AMBA
select ARM_GIC
select CLKSRC_MMIO
select HAVE_ARM_SCU if SMP
......@@ -59,12 +60,4 @@ config ARCH_TEGRA_124_SOC
Support for NVIDIA Tegra T124 processor family, based on the
ARM CortexA15MP CPU
config TEGRA_AHB
bool "Enable AHB driver for NVIDIA Tegra SoCs"
default y
help
Adds AHB configuration functionality for NVIDIA Tegra SoCs,
which controls AHB bus master arbitration and some
performance parameters(priority, prefech size).
endif
......@@ -166,9 +166,6 @@ endmenu
menu "Bus support"
config ARM_AMBA
bool
config PCI
bool "PCI support"
help
......
......@@ -137,13 +137,16 @@ static inline void set_iommu_table_base_and_group(struct device *dev,
iommu_add_device(dev);
}
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask, enum dma_data_direction direction,
struct dma_attrs *attrs);
extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs);
extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask,
enum dma_data_direction direction,
struct dma_attrs *attrs);
extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
struct scatterlist *sglist,
int nelems,
enum dma_data_direction direction,
struct dma_attrs *attrs);
extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
size_t size, dma_addr_t *dma_handle,
......
......@@ -60,16 +60,16 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
device_to_mask(dev), direction, attrs);
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
device_to_mask(dev), direction, attrs);
}
static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
attrs);
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
direction, attrs);
}
/* We support DMA to/from any memory page via the iommu */
......
......@@ -428,10 +428,10 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
ppc_md.tce_flush(tbl);
}
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask, enum dma_data_direction direction,
struct dma_attrs *attrs)
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
dma_addr_t dma_next = 0, dma_addr;
struct scatterlist *s, *outs, *segstart;
......@@ -539,7 +539,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG("mapped %d elements:\n", outcount);
/* For the sake of iommu_unmap_sg, we clear out the length in the
/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
* next entry of the sglist if we didn't fill the list completely
*/
if (outcount < incount) {
......@@ -572,9 +572,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
}
void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct scatterlist *sg;
......
......@@ -621,8 +621,9 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
else
return iommu_map_sg(dev, cell_get_iommu_table(dev), sg, nents,
device_to_mask(dev), direction, attrs);
return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
nents, device_to_mask(dev),
direction, attrs);
}
static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
......@@ -632,8 +633,8 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
else
iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, direction,
attrs);
ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
direction, attrs);
}
static int dma_fixed_dma_supported(struct device *dev, u64 mask)
......
menu "Device Drivers"
source "drivers/amba/Kconfig"
source "drivers/base/Kconfig"
source "drivers/bus/Kconfig"
......
config ARM_AMBA
bool
if ARM_AMBA
config TEGRA_AHB
bool "Enable AHB driver for NVIDIA Tegra SoCs"
default y if ARCH_TEGRA
help
Adds AHB configuration functionality for NVIDIA Tegra SoCs,
which controls AHB bus master arbitration and some performance
parameters (priority, prefetch size).
endif
......@@ -185,3 +185,16 @@ struct clk *tegra_clk_register_divider(const char *name,
return clk;
}
static const struct clk_div_table mc_div_table[] = {
{ .val = 0, .div = 2 },
{ .val = 1, .div = 1 },
{ .val = 0, .div = 0 },
};
struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
void __iomem *reg, spinlock_t *lock)
{
return clk_register_divider_table(NULL, name, parent_name, 0, reg,
16, 1, 0, mc_div_table, lock);
}
......@@ -173,6 +173,7 @@ static DEFINE_SPINLOCK(pll_d_lock);
static DEFINE_SPINLOCK(pll_d2_lock);
static DEFINE_SPINLOCK(pll_u_lock);
static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(emc_lock);
static struct div_nmp pllxc_nmp = {
.divm_shift = 0,
......@@ -1228,7 +1229,11 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
ARRAY_SIZE(mux_pllmcp_clkm),
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
29, 3, 0, NULL);
29, 3, 0, &emc_lock);
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
clks[TEGRA114_CLK_MC] = clk;
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
......
......@@ -132,6 +132,7 @@ static DEFINE_SPINLOCK(pll_d2_lock);
static DEFINE_SPINLOCK(pll_e_lock);
static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
static DEFINE_SPINLOCK(emc_lock);
/* possible OSC frequencies in Hz */
static unsigned long tegra124_input_freq[] = {
......@@ -1127,7 +1128,11 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm), 0,
clk_base + CLK_SOURCE_EMC,
29, 3, 0, NULL);
29, 3, 0, &emc_lock);
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
clks[TEGRA124_CLK_MC] = clk;
/* cml0 */
clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
......
......@@ -140,6 +140,8 @@ static struct cpu_clk_suspend_context {
static void __iomem *clk_base;
static void __iomem *pmc_base;
static DEFINE_SPINLOCK(emc_lock);
#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
_clk_num, _gate_flags, _clk_id) \
TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
......@@ -819,11 +821,15 @@ static void __init tegra20_periph_clk_init(void)
ARRAY_SIZE(mux_pllmcp_clkm),
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
30, 2, 0, &emc_lock);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
57, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_EMC] = clk;
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
clks[TEGRA20_CLK_MC] = clk;
/* dsi */
clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
48, periph_clk_enb_refcnt);
......
......@@ -177,6 +177,7 @@ static unsigned long input_freq;
static DEFINE_SPINLOCK(cml_lock);
static DEFINE_SPINLOCK(pll_d_lock);
static DEFINE_SPINLOCK(emc_lock);
#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
_clk_num, _gate_flags, _clk_id) \
......@@ -1157,11 +1158,15 @@ static void __init tegra30_periph_clk_init(void)
ARRAY_SIZE(mux_pllmcp_clkm),
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
30, 2, 0, &emc_lock);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
57, periph_clk_enb_refcnt);
clks[TEGRA30_CLK_EMC] = clk;
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
clks[TEGRA30_CLK_MC] = clk;
/* cml0 */
clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
0, 0, &cml_lock);
......
......@@ -86,6 +86,8 @@ struct clk *tegra_clk_register_divider(const char *name,
const char *parent_name, void __iomem *reg,
unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
u8 frac_width, spinlock_t *lock);
struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
void __iomem *reg, spinlock_t *lock);
/*
* Tegra PLL:
......
......@@ -163,14 +163,14 @@ config TEGRA_IOMMU_GART
hardware included on Tegra SoCs.
config TEGRA_IOMMU_SMMU
bool "Tegra SMMU IOMMU Support"
depends on ARCH_TEGRA && TEGRA_AHB
bool "NVIDIA Tegra SMMU Support"
depends on ARCH_TEGRA
depends on TEGRA_AHB
depends on TEGRA_MC
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
shared with the operating system into contiguous I/O virtual
space through the SMMU (System Memory Management Unit)
hardware included on Tegra SoCs.
This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra
SoCs (Tegra30 up to Tegra124).
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
......
......@@ -3424,6 +3424,7 @@ static const struct iommu_ops amd_iommu_ops = {
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = amd_iommu_iova_to_phys,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
......
......@@ -1652,6 +1652,7 @@ static const struct iommu_ops arm_smmu_ops = {
.detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
......
......@@ -1178,6 +1178,7 @@ static const struct iommu_ops exynos_iommu_ops = {
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = exynos_iommu_iova_to_phys,
.add_device = exynos_iommu_add_device,
.remove_device = exynos_iommu_remove_device,
......
......@@ -4467,6 +4467,7 @@ static const struct iommu_ops intel_iommu_ops = {
.detach_dev = intel_iommu_detach_device,
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = intel_iommu_iova_to_phys,
.add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
......
......@@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
kfree(nb);
return err;
}
return bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
if (err) {
bus_unregister_notifier(bus, nb);
kfree(nb);
return err;
}
return 0;
}
/**
......@@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
*/
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
{
int err;
if (bus->iommu_ops != NULL)
return -EBUSY;
bus->iommu_ops = ops;
/* Do IOMMU specific setup for this bus-type */
return iommu_bus_init(bus, ops);
err = iommu_bus_init(bus, ops);
if (err)
bus->iommu_ops = NULL;
return err;
}
EXPORT_SYMBOL_GPL(bus_set_iommu);
......@@ -1124,6 +1138,38 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
}
EXPORT_SYMBOL_GPL(iommu_unmap);
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
struct scatterlist *s;
size_t mapped = 0;
unsigned int i;
int ret;
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s));
/* We are mapping on page boundarys, so offset must be 0 */
if (s->offset)
goto out_err;
ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
if (ret)
goto out_err;
mapped += s->length;
}
return mapped;
out_err:
/* undo mappings already done */
iommu_unmap(domain, iova, mapped);
return 0;
}
EXPORT_SYMBOL_GPL(default_iommu_map_sg);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
......
......@@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = {
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
.remove_device = ipmmu_remove_device,
......
......@@ -681,6 +681,7 @@ static const struct iommu_ops msm_iommu_ops = {
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
};
......
......@@ -1288,6 +1288,7 @@ static const struct iommu_ops omap_iommu_ops = {
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = omap_iommu_iova_to_phys,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
......
......@@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = {
.detach_dev = shmobile_iommu_detach_device,
.map = shmobile_iommu_map,
.unmap = shmobile_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = shmobile_iommu_iova_to_phys,
.add_device = shmobile_iommu_add_device,
.pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
......
This diff is collapsed.
......@@ -61,16 +61,6 @@ config TEGRA20_MC
analysis, especially for IOMMU/GART(Graphics Address
Relocation Table) module.
config TEGRA30_MC
bool "Tegra30 Memory Controller(MC) driver"
default y
depends on ARCH_TEGRA_3x_SOC
help
This driver is for the Memory Controller(MC) module available
in Tegra30 SoCs, mainly for a address translation fault
analysis, especially for IOMMU/SMMU(System Memory Management
Unit) module.
config FSL_CORENET_CF
tristate "Freescale CoreNet Error Reporting"
depends on FSL_SOC_BOOKE
......@@ -85,4 +75,6 @@ config FSL_IFC
bool
depends on FSL_SOC
source "drivers/memory/tegra/Kconfig"
endif
......@@ -12,4 +12,5 @@ obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o
obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_TEGRA30_MC) += tegra30-mc.o
obj-$(CONFIG_TEGRA_MC) += tegra/
config TEGRA_MC
bool "NVIDIA Tegra Memory Controller support"
default y
depends on ARCH_TEGRA
help
This driver supports the Memory Controller (MC) hardware found on
NVIDIA Tegra SoCs.
tegra-mc-y := mc.o
tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o
tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o
tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o
obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
/*
* Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "mc.h"
#define MC_INTSTATUS 0x000
#define MC_INT_DECERR_MTS (1 << 16)
#define MC_INT_SECERR_SEC (1 << 13)
#define MC_INT_DECERR_VPR (1 << 12)
#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
#define MC_INT_ARBITRATION_EMEM (1 << 9)
#define MC_INT_SECURITY_VIOLATION (1 << 8)
#define MC_INT_DECERR_EMEM (1 << 6)
#define MC_INTMASK 0x004
#define MC_ERR_STATUS 0x08
#define MC_ERR_STATUS_TYPE_SHIFT 28
#define MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE (6 << MC_ERR_STATUS_TYPE_SHIFT)
#define MC_ERR_STATUS_TYPE_MASK (0x7 << MC_ERR_STATUS_TYPE_SHIFT)
#define MC_ERR_STATUS_READABLE (1 << 27)
#define MC_ERR_STATUS_WRITABLE (1 << 26)
#define MC_ERR_STATUS_NONSECURE (1 << 25)
#define MC_ERR_STATUS_ADR_HI_SHIFT 20
#define MC_ERR_STATUS_ADR_HI_MASK 0x3
#define MC_ERR_STATUS_SECURITY (1 << 17)
#define MC_ERR_STATUS_RW (1 << 16)
#define MC_ERR_STATUS_CLIENT_MASK 0x7f
#define MC_ERR_ADR 0x0c
#define MC_EMEM_ARB_CFG 0x90
#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) (((x) & 0x1ff) << 0)
#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff
#define MC_EMEM_ARB_MISC0 0xd8
static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
{ .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
#endif
#ifdef CONFIG_ARCH_TEGRA_114_SOC
{ .compatible = "nvidia,tegra114-mc", .data = &tegra114_mc_soc },
#endif
#ifdef CONFIG_ARCH_TEGRA_124_SOC
{ .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc },
#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
{
unsigned long long tick;
unsigned int i;
u32 value;
/* compute the number of MC clock cycles per tick */
tick = mc->tick * clk_get_rate(mc->clk);
do_div(tick, NSEC_PER_SEC);
value = readl(mc->regs + MC_EMEM_ARB_CFG);
value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
writel(value, mc->regs + MC_EMEM_ARB_CFG);
/* write latency allowance defaults */
for (i = 0; i < mc->soc->num_clients; i++) {
const struct tegra_mc_la *la = &mc->soc->clients[i].la;
u32 value;
value = readl(mc->regs + la->reg);
value &= ~(la->mask << la->shift);
value |= (la->def & la->mask) << la->shift;
writel(value, mc->regs + la->reg);
}
return 0;
}
static const char *const status_names[32] = {
[ 1] = "External interrupt",
[ 6] = "EMEM address decode error",
[ 8] = "Security violation",
[ 9] = "EMEM arbitration error",
[10] = "Page fault",
[11] = "Invalid APB ASID update",
[12] = "VPR violation",
[13] = "Secure carveout violation",
[16] = "MTS carveout violation",
};
static const char *const error_names[8] = {
[2] = "EMEM decode error",
[3] = "TrustZone violation",
[4] = "Carveout violation",
[6] = "SMMU translation error",
};
static irqreturn_t tegra_mc_irq(int irq, void *data)
{
struct tegra_mc *mc = data;
unsigned long status, mask;
unsigned int bit;
/* mask all interrupts to avoid flooding */
status = mc_readl(mc, MC_INTSTATUS);
mask = mc_readl(mc, MC_INTMASK);
for_each_set_bit(bit, &status, 32) {
const char *error = status_names[bit] ?: "unknown";
const char *client = "unknown", *desc;
const char *direction, *secure;
phys_addr_t addr = 0;
unsigned int i;
char perm[7];
u8 id, type;
u32 value;
value = mc_readl(mc, MC_ERR_STATUS);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (mc->soc->num_address_bits > 32) {
addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
MC_ERR_STATUS_ADR_HI_MASK);
addr <<= 32;
}
#endif
if (value & MC_ERR_STATUS_RW)
direction = "write";
else
direction = "read";
if (value & MC_ERR_STATUS_SECURITY)
secure = "secure ";
else
secure = "";
id = value & MC_ERR_STATUS_CLIENT_MASK;
for (i = 0; i < mc->soc->num_clients; i++) {
if (mc->soc->clients[i].id == id) {
client = mc->soc->clients[i].name;
break;
}
}
type = (value & MC_ERR_STATUS_TYPE_MASK) >>
MC_ERR_STATUS_TYPE_SHIFT;
desc = error_names[type];
switch (value & MC_ERR_STATUS_TYPE_MASK) {
case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
perm[0] = ' ';
perm[1] = '[';
if (value & MC_ERR_STATUS_READABLE)
perm[2] = 'R';
else
perm[2] = '-';
if (value & MC_ERR_STATUS_WRITABLE)
perm[3] = 'W';
else
perm[3] = '-';
if (value & MC_ERR_STATUS_NONSECURE)
perm[4] = '-';
else
perm[4] = 'S';
perm[5] = ']';
perm[6] = '\0';
break;
default:
perm[0] = '\0';
break;
}
value = mc_readl(mc, MC_ERR_ADR);
addr |= value;
dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
client, secure, direction, &addr, error,
desc, perm);
}
/* clear interrupts */
mc_writel(mc, status, MC_INTSTATUS);
return IRQ_HANDLED;
}
static int tegra_mc_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct resource *res;
struct tegra_mc *mc;
u32 value;
int err;
match = of_match_node(tegra_mc_of_match, pdev->dev.of_node);
if (!match)
return -ENODEV;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
platform_set_drvdata(pdev, mc);
mc->soc = match->data;
mc->dev = &pdev->dev;
/* length of MC tick in nanoseconds */
mc->tick = 30;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mc->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mc->regs))
return PTR_ERR(mc->regs);
mc->clk = devm_clk_get(&pdev->dev, "mc");
if (IS_ERR(mc->clk)) {
dev_err(&pdev->dev, "failed to get MC clock: %ld\n",
PTR_ERR(mc->clk));
return PTR_ERR(mc->clk);
}
err = tegra_mc_setup_latency_allowance(mc);
if (err < 0) {
dev_err(&pdev->dev, "failed to setup latency allowance: %d\n",
err);
return err;
}
if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU)) {
mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
if (IS_ERR(mc->smmu)) {
dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
PTR_ERR(mc->smmu));
return PTR_ERR(mc->smmu);
}
}
mc->irq = platform_get_irq(pdev, 0);
if (mc->irq < 0) {
dev_err(&pdev->dev, "interrupt not specified\n");
return mc->irq;
}
err = devm_request_irq(&pdev->dev, mc->irq, tegra_mc_irq, IRQF_SHARED,
dev_name(&pdev->dev), mc);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
err);
return err;
}
value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
MC_INT_ARBITRATION_EMEM | MC_INT_SECURITY_VIOLATION |
MC_INT_DECERR_EMEM;
mc_writel(mc, value, MC_INTMASK);
return 0;
}
static struct platform_driver tegra_mc_driver = {
.driver = {
.name = "tegra-mc",
.of_match_table = tegra_mc_of_match,
.suppress_bind_attrs = true,
},
.prevent_deferred_probe = true,
.probe = tegra_mc_probe,
};
static int tegra_mc_init(void)
{
return platform_driver_register(&tegra_mc_driver);
}
arch_initcall(tegra_mc_init);
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");
MODULE_LICENSE("GPL v2");
/*
* Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef MEMORY_TEGRA_MC_H
#define MEMORY_TEGRA_MC_H
#include <linux/io.h>
#include <linux/types.h>
#include <soc/tegra/mc.h>
static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
{
return readl(mc->regs + offset);
}
static inline void mc_writel(struct tegra_mc *mc, u32 value,
unsigned long offset)
{
writel(value, mc->regs + offset);
}
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
extern const struct tegra_mc_soc tegra30_mc_soc;
#endif
#ifdef CONFIG_ARCH_TEGRA_114_SOC
extern const struct tegra_mc_soc tegra114_mc_soc;
#endif
#ifdef CONFIG_ARCH_TEGRA_124_SOC
extern const struct tegra_mc_soc tegra124_mc_soc;
#endif
#endif /* MEMORY_TEGRA_MC_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Tegra30 Memory Controller
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#define DRV_NAME "tegra30-mc"
#define MC_INTSTATUS 0x0
#define MC_INTMASK 0x4
#define MC_INT_ERR_SHIFT 6
#define MC_INT_ERR_MASK (0x1f << MC_INT_ERR_SHIFT)
#define MC_INT_DECERR_EMEM BIT(MC_INT_ERR_SHIFT)
#define MC_INT_SECURITY_VIOLATION BIT(MC_INT_ERR_SHIFT + 2)
#define MC_INT_ARBITRATION_EMEM BIT(MC_INT_ERR_SHIFT + 3)
#define MC_INT_INVALID_SMMU_PAGE BIT(MC_INT_ERR_SHIFT + 4)
#define MC_ERR_STATUS 0x8
#define MC_ERR_ADR 0xc
#define MC_ERR_TYPE_SHIFT 28
#define MC_ERR_TYPE_MASK (7 << MC_ERR_TYPE_SHIFT)
#define MC_ERR_TYPE_DECERR_EMEM 2
#define MC_ERR_TYPE_SECURITY_TRUSTZONE 3
#define MC_ERR_TYPE_SECURITY_CARVEOUT 4
#define MC_ERR_TYPE_INVALID_SMMU_PAGE 6
#define MC_ERR_INVALID_SMMU_PAGE_SHIFT 25
#define MC_ERR_INVALID_SMMU_PAGE_MASK (7 << MC_ERR_INVALID_SMMU_PAGE_SHIFT)
#define MC_ERR_RW_SHIFT 16
#define MC_ERR_RW BIT(MC_ERR_RW_SHIFT)
#define MC_ERR_SECURITY BIT(MC_ERR_RW_SHIFT + 1)
#define SECURITY_VIOLATION_TYPE BIT(30) /* 0=TRUSTZONE, 1=CARVEOUT */
#define MC_EMEM_ARB_CFG 0x90
#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
#define MC_EMEM_ARB_TIMING_RCD 0x98
#define MC_EMEM_ARB_TIMING_RP 0x9c
#define MC_EMEM_ARB_TIMING_RC 0xa0
#define MC_EMEM_ARB_TIMING_RAS 0xa4
#define MC_EMEM_ARB_TIMING_FAW 0xa8
#define MC_EMEM_ARB_TIMING_RRD 0xac
#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
#define MC_EMEM_ARB_TIMING_R2R 0xb8
#define MC_EMEM_ARB_TIMING_W2W 0xbc
#define MC_EMEM_ARB_TIMING_R2W 0xc0
#define MC_EMEM_ARB_TIMING_W2R 0xc4
#define MC_EMEM_ARB_DA_TURNS 0xd0
#define MC_EMEM_ARB_DA_COVERS 0xd4
#define MC_EMEM_ARB_MISC0 0xd8
#define MC_EMEM_ARB_MISC1 0xdc
#define MC_EMEM_ARB_RING3_THROTTLE 0xe4
#define MC_EMEM_ARB_OVERRIDE 0xe8
#define MC_TIMING_CONTROL 0xfc
#define MC_CLIENT_ID_MASK 0x7f
#define NUM_MC_REG_BANKS 4
struct tegra30_mc {
void __iomem *regs[NUM_MC_REG_BANKS];
struct device *dev;
u32 ctx[0];
};
static inline u32 mc_readl(struct tegra30_mc *mc, u32 offs)
{
u32 val = 0;
if (offs < 0x10)
val = readl(mc->regs[0] + offs);
else if (offs < 0x1f0)
val = readl(mc->regs[1] + offs - 0x3c);
else if (offs < 0x228)
val = readl(mc->regs[2] + offs - 0x200);
else if (offs < 0x400)
val = readl(mc->regs[3] + offs - 0x284);
return val;
}
static inline void mc_writel(struct tegra30_mc *mc, u32 val, u32 offs)
{
if (offs < 0x10)
writel(val, mc->regs[0] + offs);
else if (offs < 0x1f0)
writel(val, mc->regs[1] + offs - 0x3c);
else if (offs < 0x228)
writel(val, mc->regs[2] + offs - 0x200);
else if (offs < 0x400)
writel(val, mc->regs[3] + offs - 0x284);
}
static const char * const tegra30_mc_client[] = {
"csr_ptcr",
"cbr_display0a",
"cbr_display0ab",
"cbr_display0b",
"cbr_display0bb",
"cbr_display0c",
"cbr_display0cb",
"cbr_display1b",
"cbr_display1bb",
"cbr_eppup",
"cbr_g2pr",
"cbr_g2sr",
"cbr_mpeunifbr",
"cbr_viruv",
"csr_afir",
"csr_avpcarm7r",
"csr_displayhc",
"csr_displayhcb",
"csr_fdcdrd",
"csr_fdcdrd2",
"csr_g2dr",
"csr_hdar",
"csr_host1xdmar",
"csr_host1xr",
"csr_idxsrd",
"csr_idxsrd2",
"csr_mpe_ipred",
"csr_mpeamemrd",
"csr_mpecsrd",
"csr_ppcsahbdmar",
"csr_ppcsahbslvr",
"csr_satar",
"csr_texsrd",
"csr_texsrd2",
"csr_vdebsevr",
"csr_vdember",
"csr_vdemcer",
"csr_vdetper",
"csr_mpcorelpr",
"csr_mpcorer",
"cbw_eppu",
"cbw_eppv",
"cbw_eppy",
"cbw_mpeunifbw",
"cbw_viwsb",
"cbw_viwu",
"cbw_viwv",
"cbw_viwy",
"ccw_g2dw",
"csw_afiw",
"csw_avpcarm7w",
"csw_fdcdwr",
"csw_fdcdwr2",
"csw_hdaw",
"csw_host1xw",
"csw_ispw",
"csw_mpcorelpw",
"csw_mpcorew",
"csw_mpecswr",
"csw_ppcsahbdmaw",
"csw_ppcsahbslvw",
"csw_sataw",
"csw_vdebsevw",
"csw_vdedbgw",
"csw_vdembew",
"csw_vdetpmw",
};
static void tegra30_mc_decode(struct tegra30_mc *mc, int n)
{
u32 err, addr;
const char * const mc_int_err[] = {
"MC_DECERR",
"Unknown",
"MC_SECURITY_ERR",
"MC_ARBITRATION_EMEM",
"MC_SMMU_ERR",
};
const char * const err_type[] = {
"Unknown",
"Unknown",
"DECERR_EMEM",
"SECURITY_TRUSTZONE",
"SECURITY_CARVEOUT",
"Unknown",
"INVALID_SMMU_PAGE",
"Unknown",
};
char attr[6];
int cid, perm, type, idx;
const char *client = "Unknown";
idx = n - MC_INT_ERR_SHIFT;
if ((idx < 0) || (idx >= ARRAY_SIZE(mc_int_err)) || (idx == 1)) {
dev_err_ratelimited(mc->dev, "Unknown interrupt status %08lx\n",
BIT(n));
return;
}
err = mc_readl(mc, MC_ERR_STATUS);
type = (err & MC_ERR_TYPE_MASK) >> MC_ERR_TYPE_SHIFT;
perm = (err & MC_ERR_INVALID_SMMU_PAGE_MASK) >>
MC_ERR_INVALID_SMMU_PAGE_SHIFT;
if (type == MC_ERR_TYPE_INVALID_SMMU_PAGE)
sprintf(attr, "%c-%c-%c",
(perm & BIT(2)) ? 'R' : '-',
(perm & BIT(1)) ? 'W' : '-',
(perm & BIT(0)) ? 'S' : '-');
else
attr[0] = '\0';
cid = err & MC_CLIENT_ID_MASK;
if (cid < ARRAY_SIZE(tegra30_mc_client))
client = tegra30_mc_client[cid];
addr = mc_readl(mc, MC_ERR_ADR);
dev_err_ratelimited(mc->dev, "%s (0x%08x): 0x%08x %s (%s %s %s %s)\n",
mc_int_err[idx], err, addr, client,
(err & MC_ERR_SECURITY) ? "secure" : "non-secure",
(err & MC_ERR_RW) ? "write" : "read",
err_type[type], attr);
}
static const u32 tegra30_mc_ctx[] = {
MC_EMEM_ARB_CFG,
MC_EMEM_ARB_OUTSTANDING_REQ,
MC_EMEM_ARB_TIMING_RCD,
MC_EMEM_ARB_TIMING_RP,
MC_EMEM_ARB_TIMING_RC,
MC_EMEM_ARB_TIMING_RAS,
MC_EMEM_ARB_TIMING_FAW,
MC_EMEM_ARB_TIMING_RRD,
MC_EMEM_ARB_TIMING_RAP2PRE,
MC_EMEM_ARB_TIMING_WAP2PRE,
MC_EMEM_ARB_TIMING_R2R,
MC_EMEM_ARB_TIMING_W2W,
MC_EMEM_ARB_TIMING_R2W,
MC_EMEM_ARB_TIMING_W2R,
MC_EMEM_ARB_DA_TURNS,
MC_EMEM_ARB_DA_COVERS,
MC_EMEM_ARB_MISC0,
MC_EMEM_ARB_MISC1,
MC_EMEM_ARB_RING3_THROTTLE,
MC_EMEM_ARB_OVERRIDE,
MC_INTMASK,
};
#ifdef CONFIG_PM
static int tegra30_mc_suspend(struct device *dev)
{
int i;
struct tegra30_mc *mc = dev_get_drvdata(dev);
for (i = 0; i < ARRAY_SIZE(tegra30_mc_ctx); i++)
mc->ctx[i] = mc_readl(mc, tegra30_mc_ctx[i]);
return 0;
}
static int tegra30_mc_resume(struct device *dev)
{
int i;
struct tegra30_mc *mc = dev_get_drvdata(dev);
for (i = 0; i < ARRAY_SIZE(tegra30_mc_ctx); i++)
mc_writel(mc, mc->ctx[i], tegra30_mc_ctx[i]);
mc_writel(mc, 1, MC_TIMING_CONTROL);
/* Read-back to ensure that write reached */
mc_readl(mc, MC_TIMING_CONTROL);
return 0;
}
#endif
static UNIVERSAL_DEV_PM_OPS(tegra30_mc_pm,
tegra30_mc_suspend,
tegra30_mc_resume, NULL);
static const struct of_device_id tegra30_mc_of_match[] = {
{ .compatible = "nvidia,tegra30-mc", },
{},
};
static irqreturn_t tegra30_mc_isr(int irq, void *data)
{
u32 stat, mask, bit;
struct tegra30_mc *mc = data;
stat = mc_readl(mc, MC_INTSTATUS);
mask = mc_readl(mc, MC_INTMASK);
mask &= stat;
if (!mask)
return IRQ_NONE;
while ((bit = ffs(mask)) != 0) {
tegra30_mc_decode(mc, bit - 1);
mask &= ~BIT(bit - 1);
}
mc_writel(mc, stat, MC_INTSTATUS);
return IRQ_HANDLED;
}
static int tegra30_mc_probe(struct platform_device *pdev)
{
struct resource *irq;
struct tegra30_mc *mc;
size_t bytes;
int err, i;
u32 intmask;
bytes = sizeof(*mc) + sizeof(u32) * ARRAY_SIZE(tegra30_mc_ctx);
mc = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
if (!mc)
return -ENOMEM;
mc->dev = &pdev->dev;
for (i = 0; i < ARRAY_SIZE(mc->regs); i++) {
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
mc->regs[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mc->regs[i]))
return PTR_ERR(mc->regs[i]);
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq)
return -ENODEV;
err = devm_request_irq(&pdev->dev, irq->start, tegra30_mc_isr,
IRQF_SHARED, dev_name(&pdev->dev), mc);
if (err)
return -ENODEV;
platform_set_drvdata(pdev, mc);
intmask = MC_INT_INVALID_SMMU_PAGE |
MC_INT_DECERR_EMEM | MC_INT_SECURITY_VIOLATION;
mc_writel(mc, intmask, MC_INTMASK);
return 0;
}
static struct platform_driver tegra30_mc_driver = {
.probe = tegra30_mc_probe,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = tegra30_mc_of_match,
.pm = &tegra30_mc_pm,
},
};
module_platform_driver(tegra30_mc_driver);
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_DESCRIPTION("Tegra30 MC driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
......@@ -49,7 +49,7 @@
#define TEGRA114_CLK_I2S0 30
/* 31 */
/* 32 */
#define TEGRA114_CLK_MC 32
/* 33 */
#define TEGRA114_CLK_APBDMA 34
/* 35 */
......
......@@ -48,7 +48,7 @@
#define TEGRA124_CLK_I2S0 30
/* 31 */
/* 32 */
#define TEGRA124_CLK_MC 32
/* 33 */
#define TEGRA124_CLK_APBDMA 34
/* 35 */
......
......@@ -49,7 +49,7 @@
/* 30 */
#define TEGRA20_CLK_CACHE2 31
#define TEGRA20_CLK_MEM 32
#define TEGRA20_CLK_MC 32
#define TEGRA20_CLK_AHBDMA 33
#define TEGRA20_CLK_APBDMA 34
/* 35 */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment