Commit 6a5c7be5 authored by Milton Miller's avatar Milton Miller Committed by Benjamin Herrenschmidt

powerpc: Override dma_get_required_mask by platform hook and ops

The hook dma_get_required_mask is supposed to return the mask required
by the platform to operate efficently.  The generic version of
dma_get_required_mask in driver/base/platform.c returns a mask based
only on max_pfn.  However, this is likely too big for iommu systems
and could be too small for platforms that require a dma offset or have
a secondary window at a high offset.

Override the default, provide a hook in ppc_md used by pseries lpar and
cell, and provide the default answer based on memblock_end_of_DRAM(),
with hooks for get_dma_offset, and provide an implementation for iommu
that looks at the defined table size.  Coverting from the end address
to the required bit mask is based on the generic implementation.

The need for this was discovered when the qla2xxx driver switched to
64 bit dma then reverted to 32 bit when dma_get_required_mask said
32 bits was sufficient.
Signed-off-by: default avatarMilton Miller <miltonm@bga.com>
Signed-off-by: default avatarNishanth Aravamudan <nacc@us.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-kernel@vger.kernel.org
Cc: benh@kernel.crashing.org
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent ce395088
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
/* Some dma direct funcs must be visible for use in other dma_ops */ /* Some dma direct funcs must be visible for use in other dma_ops */
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag); dma_addr_t *dma_handle, gfp_t flag);
...@@ -69,6 +71,7 @@ static inline unsigned long device_to_mask(struct device *dev) ...@@ -69,6 +71,7 @@ static inline unsigned long device_to_mask(struct device *dev)
*/ */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops; extern struct dma_map_ops dma_iommu_ops;
extern u64 dma_iommu_get_required_mask(struct device *dev);
#endif #endif
extern struct dma_map_ops dma_direct_ops; extern struct dma_map_ops dma_direct_ops;
......
...@@ -85,8 +85,9 @@ struct machdep_calls { ...@@ -85,8 +85,9 @@ struct machdep_calls {
void (*pci_dma_dev_setup)(struct pci_dev *dev); void (*pci_dma_dev_setup)(struct pci_dev *dev);
void (*pci_dma_bus_setup)(struct pci_bus *bus); void (*pci_dma_bus_setup)(struct pci_bus *bus);
/* Platform set_dma_mask override */ /* Platform set_dma_mask and dma_get_required_mask overrides */
int (*dma_set_mask)(struct device *dev, u64 dma_mask); int (*dma_set_mask)(struct device *dev, u64 dma_mask);
u64 (*dma_get_required_mask)(struct device *dev);
int (*probe)(void); int (*probe)(void);
void (*setup_arch)(void); /* Optional, may be NULL */ void (*setup_arch)(void); /* Optional, may be NULL */
......
...@@ -90,6 +90,19 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) ...@@ -90,6 +90,19 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
u64 dma_iommu_get_required_mask(struct device *dev)
{
struct iommu_table *tbl = get_iommu_table_base(dev);
u64 mask;
if (!tbl)
return 0;
mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
mask += mask - 1;
return mask;
}
struct dma_map_ops dma_iommu_ops = { struct dma_map_ops dma_iommu_ops = {
.alloc_coherent = dma_iommu_alloc_coherent, .alloc_coherent = dma_iommu_alloc_coherent,
.free_coherent = dma_iommu_free_coherent, .free_coherent = dma_iommu_free_coherent,
......
...@@ -170,6 +170,45 @@ int dma_set_mask(struct device *dev, u64 dma_mask) ...@@ -170,6 +170,45 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
} }
EXPORT_SYMBOL(dma_set_mask); EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
u64 mask, end = 0;
if (ppc_md.dma_get_required_mask)
return ppc_md.dma_get_required_mask(dev);
if (unlikely(dma_ops == NULL))
return 0;
#ifdef CONFIG_PPC64
else if (dma_ops == &dma_iommu_ops)
return dma_iommu_get_required_mask(dev);
#endif
#ifdef CONFIG_SWIOTLB
else if (dma_ops == &swiotlb_dma_ops) {
u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
end = memblock_end_of_DRAM();
if (max_direct_dma_addr && end > max_direct_dma_addr)
end = max_direct_dma_addr;
end += get_dma_offset(dev);
}
#endif
else if (dma_ops == &dma_direct_ops)
end = memblock_end_of_DRAM() + get_dma_offset(dev);
else {
WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
end = memblock_end_of_DRAM();
}
mask = 1ULL << (fls64(end) - 1);
mask += mask - 1;
return mask;
}
EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init dma_init(void) static int __init dma_init(void)
{ {
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
......
...@@ -1159,6 +1159,17 @@ static int __init setup_iommu_fixed(char *str) ...@@ -1159,6 +1159,17 @@ static int __init setup_iommu_fixed(char *str)
} }
__setup("iommu_fixed=", setup_iommu_fixed); __setup("iommu_fixed=", setup_iommu_fixed);
static u64 cell_dma_get_required_mask(struct device *dev)
{
if (!dev->dma_mask)
return 0;
if (iommu_fixed_disabled && get_dma_ops(dev) == &dma_iommu_ops)
return dma_iommu_get_required_mask(dev);
return DMA_BIT_MASK(64);
}
static int __init cell_iommu_init(void) static int __init cell_iommu_init(void)
{ {
struct device_node *np; struct device_node *np;
...@@ -1175,6 +1186,7 @@ static int __init cell_iommu_init(void) ...@@ -1175,6 +1186,7 @@ static int __init cell_iommu_init(void)
/* Setup various ppc_md. callbacks */ /* Setup various ppc_md. callbacks */
ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
ppc_md.tce_build = tce_build_cell; ppc_md.tce_build = tce_build_cell;
ppc_md.tce_free = tce_free_cell; ppc_md.tce_free = tce_free_cell;
......
...@@ -1077,12 +1077,38 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) ...@@ -1077,12 +1077,38 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
return 0; return 0;
} }
static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
{
if (!dev->dma_mask)
return 0;
if (!disable_ddw && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
struct device_node *dn;
dn = pci_device_to_OF_node(pdev);
/* search upwards for ibm,dma-window */
for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
dn = dn->parent)
if (of_get_property(dn, "ibm,dma-window", NULL))
break;
/* if there is a ibm,ddw-applicable property require 64 bits */
if (dn && PCI_DN(dn) &&
of_get_property(dn, "ibm,ddw-applicable", NULL))
return DMA_BIT_MASK(64);
}
return dma_iommu_get_required_mask(dev);
}
#else /* CONFIG_PCI */ #else /* CONFIG_PCI */
#define pci_dma_bus_setup_pSeries NULL #define pci_dma_bus_setup_pSeries NULL
#define pci_dma_dev_setup_pSeries NULL #define pci_dma_dev_setup_pSeries NULL
#define pci_dma_bus_setup_pSeriesLP NULL #define pci_dma_bus_setup_pSeriesLP NULL
#define pci_dma_dev_setup_pSeriesLP NULL #define pci_dma_dev_setup_pSeriesLP NULL
#define dma_set_mask_pSeriesLP NULL #define dma_set_mask_pSeriesLP NULL
#define dma_get_required_mask_pSeriesLP NULL
#endif /* !CONFIG_PCI */ #endif /* !CONFIG_PCI */
static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
...@@ -1186,6 +1212,7 @@ void iommu_init_early_pSeries(void) ...@@ -1186,6 +1212,7 @@ void iommu_init_early_pSeries(void)
ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
} else { } else {
ppc_md.tce_build = tce_build_pSeries; ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries; ppc_md.tce_free = tce_free_pSeries;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment