Commit 6ec7be9a authored by Kal Conley's avatar Kal Conley Committed by Daniel Borkmann

xsk: Use pool->dma_pages to check for DMA

Compare pool->dma_pages instead of pool->dma_pages_cnt to check for an
active DMA mapping. pool->dma_pages needs to be read anyway to access
the map so this compiles to more efficient code.
Signed-off-by: default avatarKal Conley <kal.conley@dectris.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20230423180157.93559-1-kal.conley@dectris.com
parent a4644119
...@@ -180,7 +180,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, ...@@ -180,7 +180,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
if (likely(!cross_pg)) if (likely(!cross_pg))
return false; return false;
return pool->dma_pages_cnt && return pool->dma_pages &&
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
} }
......
...@@ -350,7 +350,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) ...@@ -350,7 +350,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
{ {
struct xsk_dma_map *dma_map; struct xsk_dma_map *dma_map;
if (pool->dma_pages_cnt == 0) if (!pool->dma_pages)
return; return;
dma_map = xp_find_dma_map(pool); dma_map = xp_find_dma_map(pool);
...@@ -364,6 +364,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) ...@@ -364,6 +364,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
__xp_dma_unmap(dma_map, attrs); __xp_dma_unmap(dma_map, attrs);
kvfree(pool->dma_pages); kvfree(pool->dma_pages);
pool->dma_pages = NULL;
pool->dma_pages_cnt = 0; pool->dma_pages_cnt = 0;
pool->dev = NULL; pool->dev = NULL;
} }
...@@ -503,7 +504,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) ...@@ -503,7 +504,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
if (pool->unaligned) { if (pool->unaligned) {
xskb = pool->free_heads[--pool->free_heads_cnt]; xskb = pool->free_heads[--pool->free_heads_cnt];
xp_init_xskb_addr(xskb, pool, addr); xp_init_xskb_addr(xskb, pool, addr);
if (pool->dma_pages_cnt) if (pool->dma_pages)
xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
} else { } else {
xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
...@@ -569,7 +570,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd ...@@ -569,7 +570,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd
if (pool->unaligned) { if (pool->unaligned) {
xskb = pool->free_heads[--pool->free_heads_cnt]; xskb = pool->free_heads[--pool->free_heads_cnt];
xp_init_xskb_addr(xskb, pool, addr); xp_init_xskb_addr(xskb, pool, addr);
if (pool->dma_pages_cnt) if (pool->dma_pages)
xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
} else { } else {
xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment