Commit 68c9ac1d authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: remove the mapping_error dma_map_ops method

No users left except for vmd which just forwards it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a4abe0ad
...@@ -394,11 +394,6 @@ static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -394,11 +394,6 @@ static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
} }
static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
{
return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
}
static int vmd_dma_supported(struct device *dev, u64 mask) static int vmd_dma_supported(struct device *dev, u64 mask)
{ {
return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
...@@ -446,7 +441,6 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) ...@@ -446,7 +441,6 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
add_dma_domain(domain); add_dma_domain(domain);
......
...@@ -128,7 +128,6 @@ struct dma_map_ops { ...@@ -128,7 +128,6 @@ struct dma_map_ops {
enum dma_data_direction dir); enum dma_data_direction dir);
void (*cache_sync)(struct device *dev, void *vaddr, size_t size, void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction); enum dma_data_direction direction);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask); int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev); u64 (*get_required_mask)(struct device *dev);
}; };
...@@ -580,12 +579,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size, ...@@ -580,12 +579,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, dma_addr); debug_dma_mapping_error(dev, dma_addr);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
if (dma_addr == DMA_MAPPING_ERROR) if (dma_addr == DMA_MAPPING_ERROR)
return 1; return 1;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment