Commit fd522d27 authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'of-iommu-configure' of...

Merge tag 'of-iommu-configure' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into next/iommu-config

Pull "Automatic DMA configuration for OF-based IOMMU masters" from Will Deacon:

This series adds automatic IOMMU and DMA-mapping configuration for
OF-based DMA masters described using the generic IOMMU devicetree
bindings. Although there is plenty of future work around splitting up
iommu_ops, adding default IOMMU domains and sorting out automatic IOMMU
group creation for the platform_bus, this is already useful enough for
people to port over their IOMMU drivers and start using the new probing
infrastructure (indeed, Marek has patches queued for the Exynos IOMMU).

* tag 'of-iommu-configure' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux:
  iommu: store DT-probed IOMMU data privately
  arm: dma-mapping: plumb our iommu mapping ops into arch_setup_dma_ops
  arm: call iommu_init before of_platform_populate
  dma-mapping: detect and configure IOMMU in of_dma_configure
  iommu: fix initialization without 'add_device' callback
  iommu: provide helper function to configure an IOMMU for an of master
  iommu: add new iommu_ops callback for adding an OF device
  dma-mapping: replace set_arch_dma_coherent_ops with arch_setup_dma_ops
  iommu: provide early initialisation hook for IOMMU drivers
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parents 5d01410f a42a7a1f
...@@ -121,12 +121,12 @@ static inline unsigned long dma_max_pfn(struct device *dev) ...@@ -121,12 +121,12 @@ static inline unsigned long dma_max_pfn(struct device *dev)
} }
#define dma_max_pfn(dev) dma_max_pfn(dev) #define dma_max_pfn(dev) dma_max_pfn(dev)
static inline int set_arch_dma_coherent_ops(struct device *dev) #define arch_setup_dma_ops arch_setup_dma_ops
{ extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
set_dma_ops(dev, &arm_coherent_dma_ops); struct iommu_ops *iommu, bool coherent);
return 0;
} #define arch_teardown_dma_ops arch_teardown_dma_ops
#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) extern void arch_teardown_dma_ops(struct device *dev);
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/screen_info.h> #include <linux/screen_info.h>
#include <linux/of_iommu.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kexec.h> #include <linux/kexec.h>
...@@ -806,6 +807,7 @@ static int __init customize_machine(void) ...@@ -806,6 +807,7 @@ static int __init customize_machine(void)
* machine from the device tree, if no callback is provided, * machine from the device tree, if no callback is provided,
* otherwise we would always need an init_machine callback. * otherwise we would always need an init_machine callback.
*/ */
of_iommu_init();
if (machine_desc->init_machine) if (machine_desc->init_machine)
machine_desc->init_machine(); machine_desc->init_machine();
#ifdef CONFIG_OF #ifdef CONFIG_OF
......
...@@ -1947,9 +1947,8 @@ EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); ...@@ -1947,9 +1947,8 @@ EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
* arm_iommu_create_mapping) * arm_iommu_create_mapping)
* *
* Attaches specified io address space mapping to the provided device, * Attaches specified io address space mapping to the provided device,
* this replaces the dma operations (dma_map_ops pointer) with the * More than one client might be attached to the same io address space
* IOMMU aware version. More than one client might be attached to * mapping.
* the same io address space mapping.
*/ */
int arm_iommu_attach_device(struct device *dev, int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping) struct dma_iommu_mapping *mapping)
...@@ -1962,7 +1961,6 @@ int arm_iommu_attach_device(struct device *dev, ...@@ -1962,7 +1961,6 @@ int arm_iommu_attach_device(struct device *dev,
kref_get(&mapping->kref); kref_get(&mapping->kref);
dev->archdata.mapping = mapping; dev->archdata.mapping = mapping;
set_dma_ops(dev, &iommu_ops);
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0; return 0;
...@@ -1974,7 +1972,6 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device); ...@@ -1974,7 +1972,6 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
* @dev: valid struct device pointer * @dev: valid struct device pointer
* *
* Detaches the provided device from a previously attached map. * Detaches the provided device from a previously attached map.
* This voids the dma operations (dma_map_ops pointer)
*/ */
void arm_iommu_detach_device(struct device *dev) void arm_iommu_detach_device(struct device *dev)
{ {
...@@ -1989,10 +1986,82 @@ void arm_iommu_detach_device(struct device *dev) ...@@ -1989,10 +1986,82 @@ void arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev); iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping); kref_put(&mapping->kref, release_iommu_mapping);
dev->archdata.mapping = NULL; dev->archdata.mapping = NULL;
set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
} }
EXPORT_SYMBOL_GPL(arm_iommu_detach_device); EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
#endif static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
{
return coherent ? &iommu_coherent_ops : &iommu_ops;
}
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu)
{
struct dma_iommu_mapping *mapping;
if (!iommu)
return false;
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
size, dev_name(dev));
return false;
}
if (arm_iommu_attach_device(dev, mapping)) {
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
dev_name(dev));
arm_iommu_release_mapping(mapping);
return false;
}
return true;
}
static void arm_teardown_iommu_dma_ops(struct device *dev)
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}
#else
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu)
{
return false;
}
static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
struct dma_map_ops *dma_ops;
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
dma_ops = arm_get_iommu_dma_map_ops(coherent);
else
dma_ops = arm_get_dma_map_ops(coherent);
set_dma_ops(dev, dma_ops);
}
void arch_teardown_dma_ops(struct device *dev)
{
arm_teardown_iommu_dma_ops(dev);
}
...@@ -15,7 +15,7 @@ if IOMMU_SUPPORT ...@@ -15,7 +15,7 @@ if IOMMU_SUPPORT
config OF_IOMMU config OF_IOMMU
def_bool y def_bool y
depends on OF depends on OF && IOMMU_API
config FSL_PAMU config FSL_PAMU
bool "Freescale IOMMU support" bool "Freescale IOMMU support"
......
...@@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data) ...@@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data)
const struct iommu_ops *ops = cb->ops; const struct iommu_ops *ops = cb->ops;
if (!ops->add_device) if (!ops->add_device)
return -ENODEV; return 0;
WARN_ON(dev->iommu_group); WARN_ON(dev->iommu_group);
......
...@@ -18,9 +18,14 @@ ...@@ -18,9 +18,14 @@
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/iommu.h>
#include <linux/limits.h> #include <linux/limits.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_iommu.h> #include <linux/of_iommu.h>
#include <linux/slab.h>
static const struct of_device_id __iommu_of_table_sentinel
__used __section(__iommu_of_table_end);
/** /**
* of_get_dma_window - Parse *dma-window property and returns 0 if found. * of_get_dma_window - Parse *dma-window property and returns 0 if found.
...@@ -89,3 +94,87 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, ...@@ -89,3 +94,87 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(of_get_dma_window); EXPORT_SYMBOL_GPL(of_get_dma_window);
struct of_iommu_node {
struct list_head list;
struct device_node *np;
struct iommu_ops *ops;
};
static LIST_HEAD(of_iommu_list);
static DEFINE_SPINLOCK(of_iommu_lock);
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
{
struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (WARN_ON(!iommu))
return;
INIT_LIST_HEAD(&iommu->list);
iommu->np = np;
iommu->ops = ops;
spin_lock(&of_iommu_lock);
list_add_tail(&iommu->list, &of_iommu_list);
spin_unlock(&of_iommu_lock);
}
struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{
struct of_iommu_node *node;
struct iommu_ops *ops = NULL;
spin_lock(&of_iommu_lock);
list_for_each_entry(node, &of_iommu_list, list)
if (node->np == np) {
ops = node->ops;
break;
}
spin_unlock(&of_iommu_lock);
return ops;
}
struct iommu_ops *of_iommu_configure(struct device *dev)
{
struct of_phandle_args iommu_spec;
struct device_node *np;
struct iommu_ops *ops = NULL;
int idx = 0;
/*
* We don't currently walk up the tree looking for a parent IOMMU.
* See the `Notes:' section of
* Documentation/devicetree/bindings/iommu/iommu.txt
*/
while (!of_parse_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells", idx,
&iommu_spec)) {
np = iommu_spec.np;
ops = of_iommu_get_ops(np);
if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
goto err_put_node;
of_node_put(np);
idx++;
}
return ops;
err_put_node:
of_node_put(np);
return NULL;
}
void __init of_iommu_init(void)
{
struct device_node *np;
const struct of_device_id *match, *matches = &__iommu_of_table;
for_each_matching_node_and_match(np, matches, &match) {
const of_iommu_init_fn init_fn = match->data;
if (init_fn(np))
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
}
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
...@@ -164,6 +165,9 @@ static void of_dma_configure(struct device *dev) ...@@ -164,6 +165,9 @@ static void of_dma_configure(struct device *dev)
{ {
u64 dma_addr, paddr, size; u64 dma_addr, paddr, size;
int ret; int ret;
bool coherent;
unsigned long offset;
struct iommu_ops *iommu;
/* /*
* Set default dma-mask to 32 bit. Drivers are expected to setup * Set default dma-mask to 32 bit. Drivers are expected to setup
...@@ -178,28 +182,30 @@ static void of_dma_configure(struct device *dev) ...@@ -178,28 +182,30 @@ static void of_dma_configure(struct device *dev)
if (!dev->dma_mask) if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask; dev->dma_mask = &dev->coherent_dma_mask;
/*
* if dma-coherent property exist, call arch hook to setup
* dma coherent operations.
*/
if (of_dma_is_coherent(dev->of_node)) {
set_arch_dma_coherent_ops(dev);
dev_dbg(dev, "device is dma coherent\n");
}
/*
* if dma-ranges property doesn't exist - just return else
* setup the dma offset
*/
ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size); ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size);
if (ret < 0) { if (ret < 0) {
dev_dbg(dev, "no dma range information to setup\n"); dma_addr = offset = 0;
return; size = dev->coherent_dma_mask;
} else {
offset = PFN_DOWN(paddr - dma_addr);
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
} }
dev->dma_pfn_offset = offset;
coherent = of_dma_is_coherent(dev->of_node);
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
iommu = of_iommu_configure(dev);
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
/* DMA ranges found. Calculate and set dma_pfn_offset */ arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr); }
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
static void of_dma_deconfigure(struct device *dev)
{
arch_teardown_dma_ops(dev);
} }
/** /**
...@@ -228,16 +234,12 @@ static struct platform_device *of_platform_device_create_pdata( ...@@ -228,16 +234,12 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev) if (!dev)
goto err_clear_flag; goto err_clear_flag;
of_dma_configure(&dev->dev);
dev->dev.bus = &platform_bus_type; dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data; dev->dev.platform_data = platform_data;
of_dma_configure(&dev->dev);
/* We do not fill the DMA ops for platform devices by default.
* This is currently the responsibility of the platform code
* to do such, possibly using a device notifier
*/
if (of_device_add(dev) != 0) { if (of_device_add(dev) != 0) {
of_dma_deconfigure(&dev->dev);
platform_device_put(dev); platform_device_put(dev);
goto err_clear_flag; goto err_clear_flag;
} }
......
...@@ -164,6 +164,7 @@ ...@@ -164,6 +164,7 @@
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
...@@ -497,6 +498,7 @@ ...@@ -497,6 +498,7 @@
CLK_OF_TABLES() \ CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \
CLKSRC_OF_TABLES() \ CLKSRC_OF_TABLES() \
IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \ CPU_METHOD_OF_TABLES() \
KERNEL_DTB() \ KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \ IRQCHIP_OF_MATCH_TABLE() \
......
...@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) ...@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
extern u64 dma_get_required_mask(struct device *dev); extern u64 dma_get_required_mask(struct device *dev);
#ifndef set_arch_dma_coherent_ops #ifndef arch_setup_dma_ops
static inline int set_arch_dma_coherent_ops(struct device *dev) static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
{ u64 size, struct iommu_ops *iommu,
return 0; bool coherent) { }
} #endif
#ifndef arch_teardown_dma_ops
static inline void arch_teardown_dma_ops(struct device *dev) { }
#endif #endif
static inline unsigned int dma_get_max_seg_size(struct device *dev) static inline unsigned int dma_get_max_seg_size(struct device *dev)
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/of.h>
#include <linux/types.h> #include <linux/types.h>
#include <trace/events/iommu.h> #include <trace/events/iommu.h>
...@@ -102,7 +103,9 @@ enum iommu_attr { ...@@ -102,7 +103,9 @@ enum iommu_attr {
* @remove_device: remove device from iommu grouping * @remove_device: remove device from iommu grouping
* @domain_get_attr: Query domain attributes * @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes * @domain_set_attr: Change domain attributes
* @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes * @pgsize_bitmap: bitmap of supported page sizes
* @priv: per-instance data private to the iommu driver
*/ */
struct iommu_ops { struct iommu_ops {
bool (*capable)(enum iommu_cap); bool (*capable)(enum iommu_cap);
...@@ -132,7 +135,12 @@ struct iommu_ops { ...@@ -132,7 +135,12 @@ struct iommu_ops {
/* Get the numer of window per domain */ /* Get the numer of window per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain); u32 (*domain_get_windows)(struct iommu_domain *domain);
#ifdef CONFIG_OF_IOMMU
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
#endif
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
void *priv;
}; };
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
......
#ifndef __OF_IOMMU_H #ifndef __OF_IOMMU_H
#define __OF_IOMMU_H #define __OF_IOMMU_H
#include <linux/device.h>
#include <linux/iommu.h>
#include <linux/of.h>
#ifdef CONFIG_OF_IOMMU #ifdef CONFIG_OF_IOMMU
extern int of_get_dma_window(struct device_node *dn, const char *prefix, extern int of_get_dma_window(struct device_node *dn, const char *prefix,
int index, unsigned long *busno, dma_addr_t *addr, int index, unsigned long *busno, dma_addr_t *addr,
size_t *size); size_t *size);
extern void of_iommu_init(void);
extern struct iommu_ops *of_iommu_configure(struct device *dev);
#else #else
static inline int of_get_dma_window(struct device_node *dn, const char *prefix, static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
...@@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, ...@@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
return -EINVAL; return -EINVAL;
} }
static inline void of_iommu_init(void) { }
static inline struct iommu_ops *of_iommu_configure(struct device *dev)
{
return NULL;
}
#endif /* CONFIG_OF_IOMMU */ #endif /* CONFIG_OF_IOMMU */
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
struct iommu_ops *of_iommu_get_ops(struct device_node *np);
extern struct of_device_id __iommu_of_table;
typedef int (*of_iommu_init_fn)(struct device_node *);
#define IOMMU_OF_DECLARE(name, compat, fn) \
_OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
#endif /* __OF_IOMMU_H */ #endif /* __OF_IOMMU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment