Commit e07e7541 authored by David S. Miller's avatar David S. Miller

Merge branch 'page_pool-DMA-sync'

Lorenzo Bianconi says:

====================
add DMA-sync-for-device capability to page_pool API

Introduce the possibility to sync DMA memory for device in the page_pool API.
This feature allows to sync proper DMA size and not always full buffer
(dma_sync_single_for_device can be very costly).
Please note DMA-sync-for-CPU is still device driver responsibility.
Relying on page_pool DMA sync mvneta driver improves XDP_DROP pps of
about 170Kpps:

- XDP_DROP DMA sync managed by mvneta driver:	~420Kpps
- XDP_DROP DMA sync managed by page_pool API:	~585Kpps

Do not change naming convention for the moment since the changes will hit other
drivers as well. I will address it in another series.

Changes since v4:
- do not allow the driver to set max_len to 0
- convert PP_FLAG_DMA_MAP/PP_FLAG_DMA_SYNC_DEV to BIT() macro

Changes since v3:
- move dma_sync_for_device before putting the page in ptr_ring in
  __page_pool_recycle_into_ring since ptr_ring can be consumed
  concurrently. Simplify the code moving dma_sync_for_device
  before running __page_pool_recycle_direct/__page_pool_recycle_into_ring

Changes since v2:
- rely on PP_FLAG_DMA_SYNC_DEV flag instead of dma_sync

Changes since v1:
- rename sync in dma_sync
- set dma_sync_size to 0xFFFFFFFF in page_pool_recycle_direct and
  page_pool_put_page routines
- Improve documentation
====================
Acked-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cec2975f 07e13edb
...@@ -1846,7 +1846,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp, ...@@ -1846,7 +1846,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq, struct mvneta_rx_queue *rxq,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
enum dma_data_direction dma_dir;
dma_addr_t phys_addr; dma_addr_t phys_addr;
struct page *page; struct page *page;
...@@ -1856,9 +1855,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp, ...@@ -1856,9 +1855,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
return -ENOMEM; return -ENOMEM;
phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
dma_sync_single_for_device(pp->dev->dev.parent, phys_addr,
MVNETA_MAX_RX_BUF_SIZE, dma_dir);
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
return 0; return 0;
...@@ -2097,7 +2093,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2097,7 +2093,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog); err = xdp_do_redirect(pp->dev, xdp, prog);
if (err) { if (err) {
ret = MVNETA_XDP_DROPPED; ret = MVNETA_XDP_DROPPED;
xdp_return_buff(xdp); __page_pool_put_page(rxq->page_pool,
virt_to_head_page(xdp->data),
xdp->data_end - xdp->data_hard_start,
true);
} else { } else {
ret = MVNETA_XDP_REDIR; ret = MVNETA_XDP_REDIR;
} }
...@@ -2106,7 +2105,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2106,7 +2105,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX: case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp); ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX) if (ret != MVNETA_XDP_TX)
xdp_return_buff(xdp); __page_pool_put_page(rxq->page_pool,
virt_to_head_page(xdp->data),
xdp->data_end - xdp->data_hard_start,
true);
break; break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
...@@ -2115,8 +2117,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2115,8 +2117,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act); trace_xdp_exception(pp->dev, prog, act);
/* fall through */ /* fall through */
case XDP_DROP: case XDP_DROP:
page_pool_recycle_direct(rxq->page_pool, __page_pool_put_page(rxq->page_pool,
virt_to_head_page(xdp->data)); virt_to_head_page(xdp->data),
xdp->data_end - xdp->data_hard_start,
true);
ret = MVNETA_XDP_DROPPED; ret = MVNETA_XDP_DROPPED;
break; break;
} }
...@@ -3065,11 +3069,13 @@ static int mvneta_create_page_pool(struct mvneta_port *pp, ...@@ -3065,11 +3069,13 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
struct page_pool_params pp_params = { struct page_pool_params pp_params = {
.order = 0, .order = 0,
.flags = PP_FLAG_DMA_MAP, .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size, .pool_size = size,
.nid = cpu_to_node(0), .nid = cpu_to_node(0),
.dev = pp->dev->dev.parent, .dev = pp->dev->dev.parent,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = pp->rx_offset_correction,
.max_len = MVNETA_MAX_RX_BUF_SIZE,
}; };
int err; int err;
......
...@@ -34,8 +34,18 @@ ...@@ -34,8 +34,18 @@
#include <linux/ptr_ring.h> #include <linux/ptr_ring.h>
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */ #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
#define PP_FLAG_ALL PP_FLAG_DMA_MAP * map/unmap
*/
#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
* from page_pool will be
* DMA-synced-for-device according to
* the length provided by the device
* driver.
* Please note DMA-sync-for-CPU is still
* device driver responsibility
*/
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
/* /*
* Fast allocation side cache array/stack * Fast allocation side cache array/stack
...@@ -65,6 +75,8 @@ struct page_pool_params { ...@@ -65,6 +75,8 @@ struct page_pool_params {
int nid; /* Numa node id to allocate from pages from */ int nid; /* Numa node id to allocate from pages from */
struct device *dev; /* device, for DMA pre-mapping purposes */ struct device *dev; /* device, for DMA pre-mapping purposes */
enum dma_data_direction dma_dir; /* DMA mapping direction */ enum dma_data_direction dma_dir; /* DMA mapping direction */
unsigned int max_len; /* max DMA sync memory size */
unsigned int offset; /* DMA addr offset */
}; };
struct page_pool { struct page_pool {
...@@ -151,8 +163,8 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool, ...@@ -151,8 +163,8 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool,
#endif #endif
/* Never call this directly, use helpers below */ /* Never call this directly, use helpers below */
void __page_pool_put_page(struct page_pool *pool, void __page_pool_put_page(struct page_pool *pool, struct page *page,
struct page *page, bool allow_direct); unsigned int dma_sync_size, bool allow_direct);
static inline void page_pool_put_page(struct page_pool *pool, static inline void page_pool_put_page(struct page_pool *pool,
struct page *page, bool allow_direct) struct page *page, bool allow_direct)
...@@ -161,14 +173,14 @@ static inline void page_pool_put_page(struct page_pool *pool, ...@@ -161,14 +173,14 @@ static inline void page_pool_put_page(struct page_pool *pool,
* allow registering MEM_TYPE_PAGE_POOL, but shield linker. * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/ */
#ifdef CONFIG_PAGE_POOL #ifdef CONFIG_PAGE_POOL
__page_pool_put_page(pool, page, allow_direct); __page_pool_put_page(pool, page, -1, allow_direct);
#endif #endif
} }
/* Very limited use-cases allow recycle direct */ /* Very limited use-cases allow recycle direct */
static inline void page_pool_recycle_direct(struct page_pool *pool, static inline void page_pool_recycle_direct(struct page_pool *pool,
struct page *page) struct page *page)
{ {
__page_pool_put_page(pool, page, true); __page_pool_put_page(pool, page, -1, true);
} }
/* Disconnects a page (from a page_pool). API users can have a need /* Disconnects a page (from a page_pool). API users can have a need
......
...@@ -47,6 +47,21 @@ static int page_pool_init(struct page_pool *pool, ...@@ -47,6 +47,21 @@ static int page_pool_init(struct page_pool *pool,
(pool->p.dma_dir != DMA_BIDIRECTIONAL)) (pool->p.dma_dir != DMA_BIDIRECTIONAL))
return -EINVAL; return -EINVAL;
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
/* In order to request DMA-sync-for-device the page
* needs to be mapped
*/
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
return -EINVAL;
if (!pool->p.max_len)
return -EINVAL;
/* pool->p.offset has to be set according to the address
* offset used by the DMA engine to start copying rx data
*/
}
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
return -ENOMEM; return -ENOMEM;
...@@ -115,6 +130,16 @@ static struct page *__page_pool_get_cached(struct page_pool *pool) ...@@ -115,6 +130,16 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
return page; return page;
} }
static void page_pool_dma_sync_for_device(struct page_pool *pool,
struct page *page,
unsigned int dma_sync_size)
{
dma_sync_size = min(dma_sync_size, pool->p.max_len);
dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
pool->p.offset, dma_sync_size,
pool->p.dma_dir);
}
/* slow path */ /* slow path */
noinline noinline
static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
...@@ -159,6 +184,9 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, ...@@ -159,6 +184,9 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
} }
page->dma_addr = dma; page->dma_addr = dma;
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
skip_dma_map: skip_dma_map:
/* Track how many pages are held 'in-flight' */ /* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++; pool->pages_state_hold_cnt++;
...@@ -292,8 +320,8 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page) ...@@ -292,8 +320,8 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
return !page_is_pfmemalloc(page) && page_to_nid(page) == pool->p.nid; return !page_is_pfmemalloc(page) && page_to_nid(page) == pool->p.nid;
} }
void __page_pool_put_page(struct page_pool *pool, void __page_pool_put_page(struct page_pool *pool, struct page *page,
struct page *page, bool allow_direct) unsigned int dma_sync_size, bool allow_direct)
{ {
/* This allocator is optimized for the XDP mode that uses /* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the * one-frame-per-page, but have fallbacks that act like the
...@@ -305,6 +333,10 @@ void __page_pool_put_page(struct page_pool *pool, ...@@ -305,6 +333,10 @@ void __page_pool_put_page(struct page_pool *pool,
pool_page_reusable(pool, page))) { pool_page_reusable(pool, page))) {
/* Read barrier done in page_ref_count / READ_ONCE */ /* Read barrier done in page_ref_count / READ_ONCE */
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page,
dma_sync_size);
if (allow_direct && in_serving_softirq()) if (allow_direct && in_serving_softirq())
if (__page_pool_recycle_direct(page, pool)) if (__page_pool_recycle_direct(page, pool))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment