Commit ffe3dfd4 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michael Ellerman

powerpc/dma: stop overriding dma_get_required_mask

The ppc_md and pci_controller_ops methods are unused now and can be
removed.  The dma_nommu implementation is generic to the generic one
except for using max_pfn instead of calling into the memblock API,
and all other dma_map_ops instances implement a method of their own.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarChristian Zigotzky <chzigotzky@xenosoft.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 2d6ad41b
......@@ -59,6 +59,4 @@ struct pdev_archdata {
u64 dma_mask;
};
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
#endif /* _ASM_POWERPC_DEVICE_H */
......@@ -112,7 +112,5 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
#define HAVE_ARCH_DMA_SET_MASK 1
extern u64 __dma_get_required_mask(struct device *dev);
#endif /* __KERNEL__ */
#endif /* _ASM_DMA_MAPPING_H */
......@@ -47,9 +47,7 @@ struct machdep_calls {
#endif
#endif /* CONFIG_PPC64 */
/* Platform set_dma_mask and dma_get_required_mask overrides */
int (*dma_set_mask)(struct device *dev, u64 dma_mask);
u64 (*dma_get_required_mask)(struct device *dev);
int (*probe)(void);
void (*setup_arch)(void); /* Optional, may be NULL */
......
......@@ -46,7 +46,6 @@ struct pci_controller_ops {
#endif
int (*dma_set_mask)(struct pci_dev *pdev, u64 dma_mask);
u64 (*dma_get_required_mask)(struct pci_dev *pdev);
void (*shutdown)(struct pci_controller *hose);
};
......
......@@ -318,35 +318,6 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
}
EXPORT_SYMBOL(dma_set_mask);
u64 __dma_get_required_mask(struct device *dev)
{
const struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (unlikely(dma_ops == NULL))
return 0;
if (dma_ops->get_required_mask)
return dma_ops->get_required_mask(dev);
return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
}
u64 dma_get_required_mask(struct device *dev)
{
if (ppc_md.dma_get_required_mask)
return ppc_md.dma_get_required_mask(dev);
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_controller *phb = pci_bus_to_host(pdev->bus);
if (phb->controller_ops.dma_get_required_mask)
return phb->controller_ops.dma_get_required_mask(pdev);
}
return __dma_get_required_mask(dev);
}
EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init dma_init(void)
{
#ifdef CONFIG_IBMVIO
......
......@@ -207,7 +207,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
}
EXPORT_SYMBOL(dma_mmap_attrs);
#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
static u64 dma_default_get_required_mask(struct device *dev)
{
u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
......@@ -238,7 +237,6 @@ u64 dma_get_required_mask(struct device *dev)
return dma_default_get_required_mask(dev);
}
EXPORT_SYMBOL_GPL(dma_get_required_mask);
#endif
#ifndef arch_dma_alloc_attrs
#define arch_dma_alloc_attrs(dev) (true)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment