Commit 7e024575 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Daniel Borkmann

xsk: Use dma_need_sync instead of reimplenting it

Use the dma_need_sync helper instead of (not always entirely correctly)
poking into the dma-mapping internals.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200629130359.2690853-5-hch@lst.de
parent 53937ff7
...@@ -2,9 +2,6 @@ ...@@ -2,9 +2,6 @@
#include <net/xsk_buff_pool.h> #include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/swiotlb.h>
#include "xsk_queue.h" #include "xsk_queue.h"
...@@ -124,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool) ...@@ -124,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
} }
} }
static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
{
#if defined(CONFIG_SWIOTLB)
phys_addr_t paddr;
u32 i;
for (i = 0; i < pool->dma_pages_cnt; i++) {
paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
if (is_swiotlb_buffer(paddr))
return false;
}
#endif
return true;
}
static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
{
#if defined(CONFIG_HAS_DMA)
const struct dma_map_ops *ops = get_dma_ops(pool->dev);
if (ops) {
return !ops->sync_single_for_cpu &&
!ops->sync_single_for_device;
}
if (!dma_is_direct(ops))
return false;
if (!xp_check_swiotlb_dma(pool))
return false;
if (!dev_is_dma_coherent(pool->dev)) {
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
return false;
#endif
}
#endif
return true;
}
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages) unsigned long attrs, struct page **pages, u32 nr_pages)
{ {
...@@ -179,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, ...@@ -179,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
pool->dev = dev; pool->dev = dev;
pool->dma_pages_cnt = nr_pages; pool->dma_pages_cnt = nr_pages;
pool->dma_need_sync = false;
for (i = 0; i < pool->dma_pages_cnt; i++) { for (i = 0; i < pool->dma_pages_cnt; i++) {
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
...@@ -187,13 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, ...@@ -187,13 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
xp_dma_unmap(pool, attrs); xp_dma_unmap(pool, attrs);
return -ENOMEM; return -ENOMEM;
} }
if (dma_need_sync(dev, dma))
pool->dma_need_sync = true;
pool->dma_pages[i] = dma; pool->dma_pages[i] = dma;
} }
if (pool->unaligned) if (pool->unaligned)
xp_check_dma_contiguity(pool); xp_check_dma_contiguity(pool);
pool->dma_need_sync = !xp_check_cheap_dma(pool);
return 0; return 0;
} }
EXPORT_SYMBOL(xp_dma_map); EXPORT_SYMBOL(xp_dma_map);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment