Commit f406c8e4 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Christoph Hellwig

dma: avoid redundant calls for sync operations

Quite often, devices do not need dma_sync operations on x86_64 at least.
Indeed, when dev_is_dma_coherent(dev) is true and
dev_use_swiotlb(dev) is false, iommu_dma_sync_single_for_cpu()
and friends do nothing.

However, indirectly calling them when CONFIG_RETPOLINE=y consumes about
10% of cycles on a cpu receiving packets from softirq at ~100Gbit rate.
Even if/when CONFIG_RETPOLINE is not set, there is a cost of about 3%.

Add dev->need_dma_sync boolean and turn it off during the device
initialization (dma_set_mask()) depending on the setup:
dev_is_dma_coherent() for the direct DMA, !(sync_single_for_device ||
sync_single_for_cpu) or the new dma_map_ops flag, %DMA_F_CAN_SKIP_SYNC,
advertised for non-NULL DMA ops.
Then later, if/when swiotlb is used for the first time, the flag
is reset back to on, from swiotlb_tbl_map_single().

On iavf, the UDP trafficgen with XDP_DROP in skb mode test shows
+3-5% increase for direct DMA.

Suggested-by: Christoph Hellwig <hch@lst.de> # direct DMA shortcut
Co-developed-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent fe7514b1
......@@ -691,6 +691,7 @@ struct device_physical_location {
* and optionall (if the coherent mask is large enough) also
* for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported.
* @dma_need_sync: The device needs performing DMA sync operations.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
......@@ -803,6 +804,9 @@ struct device {
#ifdef CONFIG_DMA_OPS_BYPASS
bool dma_ops_bypass : 1;
#endif
#ifdef CONFIG_DMA_NEED_SYNC
bool dma_need_sync:1;
#endif
};
/**
......
......@@ -18,8 +18,11 @@ struct iommu_ops;
*
* DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
* handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
* DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
* coherent and it's not an SWIOTLB buffer.
*/
#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
#define DMA_F_CAN_SKIP_SYNC (1 << 1)
struct dma_map_ops {
unsigned int flags;
......@@ -273,6 +276,15 @@ static inline bool dev_is_dma_coherent(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
static inline void dma_reset_need_sync(struct device *dev)
{
#ifdef CONFIG_DMA_NEED_SYNC
/* Reset it only once so that the function can be called on hotpath */
if (unlikely(!dev->dma_need_sync))
dev->dma_need_sync = true;
#endif
}
/*
* Check whether potential kmalloc() buffers are safe for non-coherent DMA.
*/
......
......@@ -282,16 +282,59 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
#endif /* CONFIG_HAS_DMA */
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir);
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
static inline bool dma_dev_need_sync(const struct device *dev)
{
/* Always call DMA sync operations when debugging is enabled */
return dev->dma_need_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
if (dma_dev_need_sync(dev))
__dma_sync_single_for_cpu(dev, addr, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
if (dma_dev_need_sync(dev))
__dma_sync_single_for_device(dev, addr, size, dir);
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
{
if (dma_dev_need_sync(dev))
__dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
static inline void dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
{
if (dma_dev_need_sync(dev))
__dma_sync_sg_for_device(dev, sg, nelems, dir);
}
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
}
#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
static inline bool dma_dev_need_sync(const struct device *dev)
{
return false;
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
......
......@@ -330,7 +330,7 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
EXPORT_SYMBOL(dma_unmap_resource);
#ifdef CONFIG_DMA_NEED_SYNC
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
......@@ -342,9 +342,9 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
EXPORT_SYMBOL(dma_sync_single_for_cpu);
EXPORT_SYMBOL(__dma_sync_single_for_cpu);
void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
......@@ -356,9 +356,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
EXPORT_SYMBOL(dma_sync_single_for_device);
EXPORT_SYMBOL(__dma_sync_single_for_device);
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
......@@ -370,9 +370,9 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
EXPORT_SYMBOL(__dma_sync_sg_for_cpu);
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
......@@ -384,18 +384,47 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
EXPORT_SYMBOL(__dma_sync_sg_for_device);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_map_direct(dev, ops))
/*
* dma_need_sync could've been reset on first SWIOTLB buffer
* mapping, but @dma_addr is not necessary an SWIOTLB buffer.
* In this case, fall back to more granular check.
*/
return dma_direct_need_sync(dev, dma_addr);
return ops->sync_single_for_cpu || ops->sync_single_for_device;
return true;
}
EXPORT_SYMBOL_GPL(dma_need_sync);
#endif /* CONFIG_DMA_NEED_SYNC */
EXPORT_SYMBOL_GPL(__dma_need_sync);
static void dma_setup_need_sync(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_map_direct(dev, ops) || (ops->flags & DMA_F_CAN_SKIP_SYNC))
/*
* dma_need_sync will be reset to %true on first SWIOTLB buffer
* mapping, if any. During the device initialization, it's
* enough to check only for the DMA coherence.
*/
dev->dma_need_sync = !dev_is_dma_coherent(dev);
else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu &&
!ops->sync_sg_for_device && !ops->sync_sg_for_cpu)
/*
* Synchronization is not possible when none of DMA sync ops
* is set.
*/
dev->dma_need_sync = false;
else
dev->dma_need_sync = true;
}
#else /* !CONFIG_DMA_NEED_SYNC */
static inline void dma_setup_need_sync(struct device *dev) { }
#endif /* !CONFIG_DMA_NEED_SYNC */
/*
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
......@@ -785,6 +814,8 @@ int dma_set_mask(struct device *dev, u64 mask)
arch_dma_set_mask(dev, mask);
*dev->dma_mask = mask;
dma_setup_need_sync(dev);
return 0;
}
EXPORT_SYMBOL(dma_set_mask);
......
......@@ -1408,6 +1408,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
return (phys_addr_t)DMA_MAPPING_ERROR;
}
/*
* If dma_need_sync wasn't set, reset it on first SWIOTLB buffer
* mapping to always sync SWIOTLB buffers.
*/
dma_reset_need_sync(dev);
/*
* Save away the mapping from the original address to the DMA address.
* This is needed when we sync the memory. Then we sync the buffer if
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment