Commit d24f9c69 authored by Milton Miller's avatar Milton Miller Committed by Benjamin Herrenschmidt

powerpc: Use the newly added get_required_mask dma_map_ops hook

Now that the generic code has dma_map_ops set, instead of having a
messy ifdef & if block in the base dma_get_required_mask hook push
the computation into the dma ops.

If the ops fails to set the get_required_mask hook default to the
width of dma_addr_t.

This also corrects ibmbus ibmebus_dma_supported to require a 64
bit mask.  I doubt anything is checking or setting the dma mask on
that bus.
Signed-off-by: default avatarMilton Miller <miltonm@bga.com>
Signed-off-by: default avatarNishanth Aravamudan <nacc@us.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-kernel@vger.kernel.org
Cc: benh@kernel.crashing.org
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 3a8f7558
...@@ -37,4 +37,6 @@ struct pdev_archdata { ...@@ -37,4 +37,6 @@ struct pdev_archdata {
u64 dma_mask; u64 dma_mask;
}; };
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
#endif /* _ASM_POWERPC_DEVICE_H */ #endif /* _ASM_POWERPC_DEVICE_H */
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
/* Some dma direct funcs must be visible for use in other dma_ops */ /* Some dma direct funcs must be visible for use in other dma_ops */
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag); dma_addr_t *dma_handle, gfp_t flag);
...@@ -71,7 +69,6 @@ static inline unsigned long device_to_mask(struct device *dev) ...@@ -71,7 +69,6 @@ static inline unsigned long device_to_mask(struct device *dev)
*/ */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops; extern struct dma_map_ops dma_iommu_ops;
extern u64 dma_iommu_get_required_mask(struct device *dev);
#endif #endif
extern struct dma_map_ops dma_direct_ops; extern struct dma_map_ops dma_direct_ops;
......
...@@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) ...@@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
u64 dma_iommu_get_required_mask(struct device *dev) static u64 dma_iommu_get_required_mask(struct device *dev)
{ {
struct iommu_table *tbl = get_iommu_table_base(dev); struct iommu_table *tbl = get_iommu_table_base(dev);
u64 mask; u64 mask;
...@@ -111,5 +111,6 @@ struct dma_map_ops dma_iommu_ops = { ...@@ -111,5 +111,6 @@ struct dma_map_ops dma_iommu_ops = {
.dma_supported = dma_iommu_dma_supported, .dma_supported = dma_iommu_dma_supported,
.map_page = dma_iommu_map_page, .map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page, .unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
}; };
EXPORT_SYMBOL(dma_iommu_ops); EXPORT_SYMBOL(dma_iommu_ops);
...@@ -24,6 +24,21 @@ ...@@ -24,6 +24,21 @@
unsigned int ppc_swiotlb_enable; unsigned int ppc_swiotlb_enable;
static u64 swiotlb_powerpc_get_required(struct device *dev)
{
u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
end = memblock_end_of_DRAM();
if (max_direct_dma_addr && end > max_direct_dma_addr)
end = max_direct_dma_addr;
end += get_dma_offset(dev);
mask = 1ULL << (fls64(end) - 1);
mask += mask - 1;
return mask;
}
/* /*
* At the moment, all platforms that use this code only require * At the moment, all platforms that use this code only require
* swiotlb to be used if we're operating on HIGHMEM. Since * swiotlb to be used if we're operating on HIGHMEM. Since
...@@ -44,6 +59,7 @@ struct dma_map_ops swiotlb_dma_ops = { ...@@ -44,6 +59,7 @@ struct dma_map_ops swiotlb_dma_ops = {
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device, .sync_sg_for_device = swiotlb_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = swiotlb_dma_mapping_error,
.get_required_mask = swiotlb_powerpc_get_required,
}; };
void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev) void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
......
...@@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) ...@@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
#endif #endif
} }
static u64 dma_direct_get_required_mask(struct device *dev)
{
u64 end, mask;
end = memblock_end_of_DRAM() + get_dma_offset(dev);
mask = 1ULL << (fls64(end) - 1);
mask += mask - 1;
return mask;
}
static inline dma_addr_t dma_direct_map_page(struct device *dev, static inline dma_addr_t dma_direct_map_page(struct device *dev,
struct page *page, struct page *page,
unsigned long offset, unsigned long offset,
...@@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = { ...@@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = {
.dma_supported = dma_direct_dma_supported, .dma_supported = dma_direct_dma_supported,
.map_page = dma_direct_map_page, .map_page = dma_direct_map_page,
.unmap_page = dma_direct_unmap_page, .unmap_page = dma_direct_unmap_page,
.get_required_mask = dma_direct_get_required_mask,
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
.sync_single_for_cpu = dma_direct_sync_single, .sync_single_for_cpu = dma_direct_sync_single,
.sync_single_for_device = dma_direct_sync_single, .sync_single_for_device = dma_direct_sync_single,
...@@ -173,7 +186,6 @@ EXPORT_SYMBOL(dma_set_mask); ...@@ -173,7 +186,6 @@ EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev) u64 dma_get_required_mask(struct device *dev)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
u64 mask, end = 0;
if (ppc_md.dma_get_required_mask) if (ppc_md.dma_get_required_mask)
return ppc_md.dma_get_required_mask(dev); return ppc_md.dma_get_required_mask(dev);
...@@ -181,31 +193,10 @@ u64 dma_get_required_mask(struct device *dev) ...@@ -181,31 +193,10 @@ u64 dma_get_required_mask(struct device *dev)
if (unlikely(dma_ops == NULL)) if (unlikely(dma_ops == NULL))
return 0; return 0;
#ifdef CONFIG_PPC64 if (dma_ops->get_required_mask)
else if (dma_ops == &dma_iommu_ops) return dma_ops->get_required_mask(dev);
return dma_iommu_get_required_mask(dev);
#endif
#ifdef CONFIG_SWIOTLB
else if (dma_ops == &swiotlb_dma_ops) {
u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
end = memblock_end_of_DRAM();
if (max_direct_dma_addr && end > max_direct_dma_addr)
end = max_direct_dma_addr;
end += get_dma_offset(dev);
}
#endif
else if (dma_ops == &dma_direct_ops)
end = memblock_end_of_DRAM() + get_dma_offset(dev);
else {
WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
end = memblock_end_of_DRAM();
}
mask = 1ULL << (fls64(end) - 1); return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
mask += mask - 1;
return mask;
} }
EXPORT_SYMBOL_GPL(dma_get_required_mask); EXPORT_SYMBOL_GPL(dma_get_required_mask);
......
...@@ -125,7 +125,12 @@ static void ibmebus_unmap_sg(struct device *dev, ...@@ -125,7 +125,12 @@ static void ibmebus_unmap_sg(struct device *dev,
static int ibmebus_dma_supported(struct device *dev, u64 mask) static int ibmebus_dma_supported(struct device *dev, u64 mask)
{ {
return 1; return mask == DMA_BIT_MASK(64);
}
static u64 ibmebus_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
} }
static struct dma_map_ops ibmebus_dma_ops = { static struct dma_map_ops ibmebus_dma_ops = {
...@@ -134,6 +139,7 @@ static struct dma_map_ops ibmebus_dma_ops = { ...@@ -134,6 +139,7 @@ static struct dma_map_ops ibmebus_dma_ops = {
.map_sg = ibmebus_map_sg, .map_sg = ibmebus_map_sg,
.unmap_sg = ibmebus_unmap_sg, .unmap_sg = ibmebus_unmap_sg,
.dma_supported = ibmebus_dma_supported, .dma_supported = ibmebus_dma_supported,
.get_required_mask = ibmebus_dma_get_required_mask,
.map_page = ibmebus_map_page, .map_page = ibmebus_map_page,
.unmap_page = ibmebus_unmap_page, .unmap_page = ibmebus_unmap_page,
}; };
......
...@@ -605,6 +605,11 @@ static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) ...@@ -605,6 +605,11 @@ static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
return dma_iommu_ops.dma_supported(dev, mask); return dma_iommu_ops.dma_supported(dev, mask);
} }
static u64 vio_dma_get_required_mask(struct device *dev)
{
return dma_iommu_ops.get_required_mask(dev);
}
struct dma_map_ops vio_dma_mapping_ops = { struct dma_map_ops vio_dma_mapping_ops = {
.alloc_coherent = vio_dma_iommu_alloc_coherent, .alloc_coherent = vio_dma_iommu_alloc_coherent,
.free_coherent = vio_dma_iommu_free_coherent, .free_coherent = vio_dma_iommu_free_coherent,
...@@ -613,7 +618,7 @@ struct dma_map_ops vio_dma_mapping_ops = { ...@@ -613,7 +618,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
.map_page = vio_dma_iommu_map_page, .map_page = vio_dma_iommu_map_page,
.unmap_page = vio_dma_iommu_unmap_page, .unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = vio_dma_iommu_dma_supported, .dma_supported = vio_dma_iommu_dma_supported,
.get_required_mask = vio_dma_get_required_mask,
}; };
/** /**
......
...@@ -1161,11 +1161,20 @@ __setup("iommu_fixed=", setup_iommu_fixed); ...@@ -1161,11 +1161,20 @@ __setup("iommu_fixed=", setup_iommu_fixed);
static u64 cell_dma_get_required_mask(struct device *dev) static u64 cell_dma_get_required_mask(struct device *dev)
{ {
struct dma_map_ops *dma_ops;
if (!dev->dma_mask) if (!dev->dma_mask)
return 0; return 0;
if (iommu_fixed_disabled && get_dma_ops(dev) == &dma_iommu_ops) if (!iommu_fixed_disabled &&
return dma_iommu_get_required_mask(dev); cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
return DMA_BIT_MASK(64);
dma_ops = get_dma_ops(dev);
if (dma_ops->get_required_mask)
return dma_ops->get_required_mask(dev);
WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
return DMA_BIT_MASK(64); return DMA_BIT_MASK(64);
} }
......
...@@ -695,12 +695,18 @@ static int ps3_dma_supported(struct device *_dev, u64 mask) ...@@ -695,12 +695,18 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
return mask >= DMA_BIT_MASK(32); return mask >= DMA_BIT_MASK(32);
} }
static u64 ps3_dma_get_required_mask(struct device *_dev)
{
return DMA_BIT_MASK(32);
}
static struct dma_map_ops ps3_sb_dma_ops = { static struct dma_map_ops ps3_sb_dma_ops = {
.alloc_coherent = ps3_alloc_coherent, .alloc_coherent = ps3_alloc_coherent,
.free_coherent = ps3_free_coherent, .free_coherent = ps3_free_coherent,
.map_sg = ps3_sb_map_sg, .map_sg = ps3_sb_map_sg,
.unmap_sg = ps3_sb_unmap_sg, .unmap_sg = ps3_sb_unmap_sg,
.dma_supported = ps3_dma_supported, .dma_supported = ps3_dma_supported,
.get_required_mask = ps3_dma_get_required_mask,
.map_page = ps3_sb_map_page, .map_page = ps3_sb_map_page,
.unmap_page = ps3_unmap_page, .unmap_page = ps3_unmap_page,
}; };
...@@ -711,6 +717,7 @@ static struct dma_map_ops ps3_ioc0_dma_ops = { ...@@ -711,6 +717,7 @@ static struct dma_map_ops ps3_ioc0_dma_ops = {
.map_sg = ps3_ioc0_map_sg, .map_sg = ps3_ioc0_map_sg,
.unmap_sg = ps3_ioc0_unmap_sg, .unmap_sg = ps3_ioc0_unmap_sg,
.dma_supported = ps3_dma_supported, .dma_supported = ps3_dma_supported,
.get_required_mask = ps3_dma_get_required_mask,
.map_page = ps3_ioc0_map_page, .map_page = ps3_ioc0_map_page,
.unmap_page = ps3_unmap_page, .unmap_page = ps3_unmap_page,
}; };
......
...@@ -1099,7 +1099,7 @@ static u64 dma_get_required_mask_pSeriesLP(struct device *dev) ...@@ -1099,7 +1099,7 @@ static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
return DMA_BIT_MASK(64); return DMA_BIT_MASK(64);
} }
return dma_iommu_get_required_mask(dev); return dma_iommu_ops.get_required_mask(dev);
} }
#else /* CONFIG_PCI */ #else /* CONFIG_PCI */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment