Commit a6016aac authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Christoph Hellwig

dma: fix DMA sync for drivers not calling dma_set_mask*()

There are several reports that the DMA sync shortcut broke non-coherent
devices.
dev->dma_need_sync is false after the &device allocation and if a driver
didn't call dma_set_mask*(), it will still be false even if the device
is not DMA-coherent and thus needs synchronizing. Due to historical
reasons, there's still a lot of drivers not calling it.
Invert the boolean, so that the sync will be performed by default and
the shortcut will be enabled only when calling dma_set_mask*().
Reported-by: default avatarSteven Price <steven.price@arm.com>
Closes: https://lore.kernel.org/lkml/010686f5-3049-46a1-8230-7752a1b433ff@arm.comReported-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Closes: https://lore.kernel.org/lkml/46160534-5003-4809-a408-6b3a3f4921e9@samsung.com
Fixes: f406c8e4. ("dma: avoid redundant calls for sync operations")
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarSteven Price <steven.price@arm.com>
Tested-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
parent 163943ac
...@@ -691,7 +691,7 @@ struct device_physical_location { ...@@ -691,7 +691,7 @@ struct device_physical_location {
* and optionall (if the coherent mask is large enough) also * and optionall (if the coherent mask is large enough) also
* for dma allocations. This flag is managed by the dma ops * for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported. * instance from ->dma_supported.
* @dma_need_sync: The device needs performing DMA sync operations. * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
* *
* At the lowest level, every device in a Linux system is represented by an * At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information * instance of struct device. The device structure contains the information
...@@ -805,7 +805,7 @@ struct device { ...@@ -805,7 +805,7 @@ struct device {
bool dma_ops_bypass : 1; bool dma_ops_bypass : 1;
#endif #endif
#ifdef CONFIG_DMA_NEED_SYNC #ifdef CONFIG_DMA_NEED_SYNC
bool dma_need_sync:1; bool dma_skip_sync:1;
#endif #endif
}; };
......
...@@ -280,8 +280,8 @@ static inline void dma_reset_need_sync(struct device *dev) ...@@ -280,8 +280,8 @@ static inline void dma_reset_need_sync(struct device *dev)
{ {
#ifdef CONFIG_DMA_NEED_SYNC #ifdef CONFIG_DMA_NEED_SYNC
/* Reset it only once so that the function can be called on hotpath */ /* Reset it only once so that the function can be called on hotpath */
if (unlikely(!dev->dma_need_sync)) if (unlikely(dev->dma_skip_sync))
dev->dma_need_sync = true; dev->dma_skip_sync = false;
#endif #endif
} }
......
...@@ -295,7 +295,7 @@ bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr); ...@@ -295,7 +295,7 @@ bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
static inline bool dma_dev_need_sync(const struct device *dev) static inline bool dma_dev_need_sync(const struct device *dev)
{ {
/* Always call DMA sync operations when debugging is enabled */ /* Always call DMA sync operations when debugging is enabled */
return dev->dma_need_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG); return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
} }
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
......
...@@ -392,7 +392,7 @@ bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr) ...@@ -392,7 +392,7 @@ bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr)
if (dma_map_direct(dev, ops)) if (dma_map_direct(dev, ops))
/* /*
* dma_need_sync could've been reset on first SWIOTLB buffer * dma_skip_sync could've been reset on first SWIOTLB buffer
* mapping, but @dma_addr is not necessary an SWIOTLB buffer. * mapping, but @dma_addr is not necessary an SWIOTLB buffer.
* In this case, fall back to more granular check. * In this case, fall back to more granular check.
*/ */
...@@ -407,20 +407,20 @@ static void dma_setup_need_sync(struct device *dev) ...@@ -407,20 +407,20 @@ static void dma_setup_need_sync(struct device *dev)
if (dma_map_direct(dev, ops) || (ops->flags & DMA_F_CAN_SKIP_SYNC)) if (dma_map_direct(dev, ops) || (ops->flags & DMA_F_CAN_SKIP_SYNC))
/* /*
* dma_need_sync will be reset to %true on first SWIOTLB buffer * dma_skip_sync will be reset to %false on first SWIOTLB buffer
* mapping, if any. During the device initialization, it's * mapping, if any. During the device initialization, it's
* enough to check only for the DMA coherence. * enough to check only for the DMA coherence.
*/ */
dev->dma_need_sync = !dev_is_dma_coherent(dev); dev->dma_skip_sync = dev_is_dma_coherent(dev);
else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu && else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu &&
!ops->sync_sg_for_device && !ops->sync_sg_for_cpu) !ops->sync_sg_for_device && !ops->sync_sg_for_cpu)
/* /*
* Synchronization is not possible when none of DMA sync ops * Synchronization is not possible when none of DMA sync ops
* is set. * is set.
*/ */
dev->dma_need_sync = false; dev->dma_skip_sync = true;
else else
dev->dma_need_sync = true; dev->dma_skip_sync = false;
} }
#else /* !CONFIG_DMA_NEED_SYNC */ #else /* !CONFIG_DMA_NEED_SYNC */
static inline void dma_setup_need_sync(struct device *dev) { } static inline void dma_setup_need_sync(struct device *dev) { }
......
...@@ -1409,7 +1409,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, ...@@ -1409,7 +1409,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
} }
/* /*
* If dma_need_sync wasn't set, reset it on first SWIOTLB buffer * If dma_skip_sync was set, reset it on first SWIOTLB buffer
* mapping to always sync SWIOTLB buffers. * mapping to always sync SWIOTLB buffers.
*/ */
dma_reset_need_sync(dev); dma_reset_need_sync(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment