Commit 78862447 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Daniel Borkmann

net: page_pool: Add bulk support for ptr_ring

Introduce the capability to batch page_pool ptr_ring refill since it is
usually run inside the driver NAPI tx completion loop.
Suggested-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Co-developed-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Acked-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Link: https://lore.kernel.org/bpf/08dd249c9522c001313f520796faa777c4089e1c.1605267335.git.lorenzo@kernel.org
parent 89653987
...@@ -152,6 +152,8 @@ struct page_pool *page_pool_create(const struct page_pool_params *params); ...@@ -152,6 +152,8 @@ struct page_pool *page_pool_create(const struct page_pool_params *params);
void page_pool_destroy(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
void page_pool_release_page(struct page_pool *pool, struct page *page); void page_pool_release_page(struct page_pool *pool, struct page *page);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
#else #else
static inline void page_pool_destroy(struct page_pool *pool) static inline void page_pool_destroy(struct page_pool *pool)
{ {
...@@ -165,6 +167,11 @@ static inline void page_pool_release_page(struct page_pool *pool, ...@@ -165,6 +167,11 @@ static inline void page_pool_release_page(struct page_pool *pool,
struct page *page) struct page *page)
{ {
} }
static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count)
{
}
#endif #endif
void page_pool_put_page(struct page_pool *pool, struct page *page, void page_pool_put_page(struct page_pool *pool, struct page *page,
...@@ -215,4 +222,23 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) ...@@ -215,4 +222,23 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
if (unlikely(pool->p.nid != new_nid)) if (unlikely(pool->p.nid != new_nid))
page_pool_update_nid(pool, new_nid); page_pool_update_nid(pool, new_nid);
} }
static inline void page_pool_ring_lock(struct page_pool *pool)
__acquires(&pool->ring.producer_lock)
{
if (in_serving_softirq())
spin_lock(&pool->ring.producer_lock);
else
spin_lock_bh(&pool->ring.producer_lock);
}
static inline void page_pool_ring_unlock(struct page_pool *pool)
__releases(&pool->ring.producer_lock)
{
if (in_serving_softirq())
spin_unlock(&pool->ring.producer_lock);
else
spin_unlock_bh(&pool->ring.producer_lock);
}
#endif /* _NET_PAGE_POOL_H */ #endif /* _NET_PAGE_POOL_H */
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include <linux/device.h> #include <linux/device.h>
#include <net/page_pool.h> #include <net/page_pool.h>
#include <net/xdp.h>
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/page-flags.h> #include <linux/page-flags.h>
...@@ -362,8 +364,9 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page) ...@@ -362,8 +364,9 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
* If the page refcnt != 1, then the page will be returned to memory * If the page refcnt != 1, then the page will be returned to memory
* subsystem. * subsystem.
*/ */
void page_pool_put_page(struct page_pool *pool, struct page *page, static __always_inline struct page *
unsigned int dma_sync_size, bool allow_direct) __page_pool_put_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size, bool allow_direct)
{ {
/* This allocator is optimized for the XDP mode that uses /* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the * one-frame-per-page, but have fallbacks that act like the
...@@ -379,15 +382,12 @@ void page_pool_put_page(struct page_pool *pool, struct page *page, ...@@ -379,15 +382,12 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
page_pool_dma_sync_for_device(pool, page, page_pool_dma_sync_for_device(pool, page,
dma_sync_size); dma_sync_size);
if (allow_direct && in_serving_softirq()) if (allow_direct && in_serving_softirq() &&
if (page_pool_recycle_in_cache(page, pool)) page_pool_recycle_in_cache(page, pool))
return; return NULL;
if (!page_pool_recycle_in_ring(pool, page)) { /* Page found as candidate for recycling */
/* Cache full, fallback to free pages */ return page;
page_pool_return_page(pool, page);
}
return;
} }
/* Fallback/non-XDP mode: API user have elevated refcnt. /* Fallback/non-XDP mode: API user have elevated refcnt.
* *
...@@ -405,9 +405,59 @@ void page_pool_put_page(struct page_pool *pool, struct page *page, ...@@ -405,9 +405,59 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
/* Do not replace this with page_pool_return_page() */ /* Do not replace this with page_pool_return_page() */
page_pool_release_page(pool, page); page_pool_release_page(pool, page);
put_page(page); put_page(page);
return NULL;
}
void page_pool_put_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size, bool allow_direct)
{
page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
if (page && !page_pool_recycle_in_ring(pool, page)) {
/* Cache full, fallback to free pages */
page_pool_return_page(pool, page);
}
} }
EXPORT_SYMBOL(page_pool_put_page); EXPORT_SYMBOL(page_pool_put_page);
/* Caller must not use data area after call, as this function overwrites it */
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count)
{
int i, bulk_len = 0;
for (i = 0; i < count; i++) {
struct page *page = virt_to_head_page(data[i]);
page = __page_pool_put_page(pool, page, -1, false);
/* Approved for bulk recycling in ptr_ring cache */
if (page)
data[bulk_len++] = page;
}
if (unlikely(!bulk_len))
return;
/* Bulk producer into ptr_ring page_pool cache */
page_pool_ring_lock(pool);
for (i = 0; i < bulk_len; i++) {
if (__ptr_ring_produce(&pool->ring, data[i]))
break; /* ring full */
}
page_pool_ring_unlock(pool);
/* Hopefully all pages was return into ptr_ring */
if (likely(i == bulk_len))
return;
/* ptr_ring cache full, free remaining pages outside producer lock
* since put_page() with refcnt == 1 can be an expensive operation
*/
for (; i < bulk_len; i++)
page_pool_return_page(pool, data[i]);
}
EXPORT_SYMBOL(page_pool_put_page_bulk);
static void page_pool_empty_ring(struct page_pool *pool) static void page_pool_empty_ring(struct page_pool *pool)
{ {
struct page *page; struct page *page;
......
...@@ -393,16 +393,11 @@ EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); ...@@ -393,16 +393,11 @@ EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq) void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
{ {
struct xdp_mem_allocator *xa = bq->xa; struct xdp_mem_allocator *xa = bq->xa;
int i;
if (unlikely(!xa)) if (unlikely(!xa || !bq->count))
return; return;
for (i = 0; i < bq->count; i++) { page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
struct page *page = virt_to_head_page(bq->q[i]);
page_pool_put_full_page(xa->page_pool, page, false);
}
/* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
bq->count = 0; bq->count = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment