Commit dc8cf755 authored by David S. Miller's avatar David S. Miller

Merge branch 'page_pool-recycling'

Matteo Croce says:

====================
page_pool: recycle buffers

This is a respin of [1]

This patchset shows the plans for allowing page_pool to handle and
maintain DMA map/unmap of the pages it serves to the driver. For this
to work a return hook in the network core is introduced.

The overall purpose is to simplify drivers, by providing a page
allocation API that does recycling, such that each driver doesn't have
to reinvent its own recycling scheme. Using page_pool in a driver
does not require implementing XDP support, but it makes it trivially
easy to do so. Instead of allocating buffers specifically for SKBs
we now allocate a generic buffer and either wrap it on an SKB
(via build_skb) or create an XDP frame.
The recycling code leverages the XDP recycle APIs.

The Marvell mvpp2 and mvneta drivers are used in this patchset to
demonstrate how to use the API, and tested on a MacchiatoBIN
and EspressoBIN boards respectively.

Please let this going in on a future -rc1 so to allow enough time
to have wider tests.

v7 -> v8:
- use page->lru.next instead of page->index for pfmemalloc
- remove conditional include
- rework page_pool_return_skb_page() so to have less conversions
  between page and addresses, and call compound_head() only once
- move some code from skb_free_head() to a new helper skb_pp_recycle()
- misc fixes

v6 -> v7:
- refresh patches against net-next
- remove a redundant call to virt_to_head_page()
- update mvneta benchmarks

v5 -> v6:
- preserve pfmemalloc bit when setting signature
- fix typo in mvneta
- rebase on next-next with the new cache
- don't clear the skb->pp_recycle in pskb_expand_head()

v4 -> v5:
- move the signature so it doesn't alias with page->mapping
- use an invalid pointer as magic
- incorporate Matthew Wilcox's changes for pfmemalloc pages
- move the __skb_frag_unref() changes to a preliminary patch
- refactor some cpp directives
- only attempt recycling if skb->head_frag
- clear skb->pp_recycle in pskb_expand_head()

v3 -> v4:
- store a pointer to page_pool instead of xdp_mem_info
- drop a patch which reduces xdp_mem_info size
- do the recycling in the page_pool code instead of xdp_return
- remove some unused headers include
- remove some useless forward declaration

v2 -> v3:
- added missing SOBs
- CCed the MM people

v1 -> v2:
- fix a commit message
- avoid setting pp_recycle multiple times on mvneta
- squash two patches to avoid breaking bisect
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 35cba15a e4017570
......@@ -2320,7 +2320,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
}
static struct sk_buff *
mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
......@@ -2331,7 +2331,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
if (!skb)
return ERR_PTR(-ENOMEM);
page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
......@@ -2343,7 +2343,10 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(frag), skb_frag_off(frag),
skb_frag_size(frag), PAGE_SIZE);
page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
/* We don't need to reset pp_recycle here. It's already set, so
* just mark fragments for recycling.
*/
page_pool_store_mem_info(skb_frag_page(frag), pool);
}
return skb;
......@@ -2425,7 +2428,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
goto next;
skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
......
......@@ -3997,7 +3997,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
if (pp)
page_pool_release_page(pp, virt_to_page(data));
skb_mark_for_recycle(skb, virt_to_page(data), pp);
else
dma_unmap_single_attrs(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE,
......
......@@ -2503,7 +2503,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
if (length == 0) {
/* don't need this page */
__skb_frag_unref(frag);
__skb_frag_unref(frag, false);
--skb_shinfo(skb)->nr_frags;
} else {
size = min(length, (unsigned) PAGE_SIZE);
......
......@@ -526,7 +526,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
fail:
while (nr > 0) {
nr--;
__skb_frag_unref(skb_shinfo(skb)->frags + nr);
__skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
}
return 0;
}
......
......@@ -1668,10 +1668,11 @@ struct address_space *page_mapping(struct page *page);
static inline bool page_is_pfmemalloc(const struct page *page)
{
/*
* Page index cannot be this large so this must be
* a pfmemalloc page.
* lru.next has bit 1 set if the page is allocated from the
* pfmemalloc reserves. Callers may simply overwrite it if
* they do not need to preserve that information.
*/
return page->index == -1UL;
return (uintptr_t)page->lru.next & BIT(1);
}
/*
......@@ -1680,12 +1681,12 @@ static inline bool page_is_pfmemalloc(const struct page *page)
*/
static inline void set_page_pfmemalloc(struct page *page)
{
page->index = -1UL;
page->lru.next = (void *)BIT(1);
}
static inline void clear_page_pfmemalloc(struct page *page)
{
page->index = 0;
page->lru.next = NULL;
}
/*
......
......@@ -96,6 +96,13 @@ struct page {
unsigned long private;
};
struct { /* page_pool used by netstack */
/**
* @pp_magic: magic value to avoid recycling non
* page_pool allocated pages.
*/
unsigned long pp_magic;
struct page_pool *pp;
unsigned long _pp_mapping_pad;
/**
* @dma_addr: might require a 64-bit value on
* 32-bit architectures.
......
......@@ -78,4 +78,7 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
/********** net/core/page_pool.c **********/
#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
#endif
......@@ -37,6 +37,7 @@
#include <linux/in6.h>
#include <linux/if_packet.h>
#include <net/flow.h>
#include <net/page_pool.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
......@@ -667,6 +668,8 @@ typedef unsigned char *sk_buff_data_t;
* @head_frag: skb was allocated from page fragments,
* not allocated by kmalloc() or vmalloc().
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @pp_recycle: mark the packet for recycling instead of freeing (implies
* page_pool support on driver)
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
......@@ -791,10 +794,12 @@ struct sk_buff {
fclone:2,
peeked:1,
head_frag:1,
pfmemalloc:1;
pfmemalloc:1,
pp_recycle:1; /* page_pool recycle indicator */
#ifdef CONFIG_SKB_EXTENSIONS
__u8 active_extensions;
#endif
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
*/
......@@ -3081,12 +3086,20 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
/**
* __skb_frag_unref - release a reference on a paged fragment.
* @frag: the paged fragment
* @recycle: recycle the page if allocated via page_pool
*
* Releases a reference on the paged fragment @frag.
* Releases a reference on the paged fragment @frag
* or recycles the page via the page_pool API.
*/
static inline void __skb_frag_unref(skb_frag_t *frag)
static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
put_page(skb_frag_page(frag));
struct page *page = skb_frag_page(frag);
#ifdef CONFIG_PAGE_POOL
if (recycle && page_pool_return_skb_page(page))
return;
#endif
put_page(page);
}
/**
......@@ -3098,7 +3111,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag)
*/
static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
__skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
}
/**
......@@ -4697,5 +4710,21 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
#endif
}
#ifdef CONFIG_PAGE_POOL
static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
struct page_pool *pp)
{
skb->pp_recycle = 1;
page_pool_store_mem_info(page, pp);
}
#endif
static inline bool skb_pp_recycle(struct sk_buff *skb, void *data)
{
if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
return false;
return page_pool_return_skb_page(virt_to_page(data));
}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
......@@ -146,6 +146,8 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
return pool->p.dma_dir;
}
bool page_pool_return_skb_page(struct page *page);
struct page_pool *page_pool_create(const struct page_pool_params *params);
#ifdef CONFIG_PAGE_POOL
......@@ -251,4 +253,11 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
spin_unlock_bh(&pool->ring.producer_lock);
}
/* Store mem_info on struct page and use it while recycling skb frags */
static inline
void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
{
page->pp = pp;
}
#endif /* _NET_PAGE_POOL_H */
......@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
#include <linux/mm.h> /* for __put_page() */
#include <linux/poison.h>
#include <trace/events/page_pool.h>
......@@ -221,6 +222,8 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
return NULL;
}
page->pp_magic |= PP_SIGNATURE;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
......@@ -263,6 +266,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page);
continue;
}
page->pp_magic |= PP_SIGNATURE;
pool->alloc.cache[pool->alloc.count++] = page;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
......@@ -341,6 +345,8 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
DMA_ATTR_SKIP_CPU_SYNC);
page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
page->pp_magic = 0;
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
*/
......@@ -622,3 +628,25 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
}
}
EXPORT_SYMBOL(page_pool_update_nid);
bool page_pool_return_skb_page(struct page *page)
{
struct page_pool *pp;
page = compound_head(page);
if (unlikely(page->pp_magic != PP_SIGNATURE))
return false;
pp = page->pp;
/* Driver set this to memory recycling info. Reset it on recycle.
* This will *not* work for NIC using a split-page memory model.
* The page will be returned to the pool here regardless of the
* 'flipped' fragment being in use or not.
*/
page->pp = NULL;
page_pool_put_full_page(pp, page, false);
return true;
}
EXPORT_SYMBOL(page_pool_return_skb_page);
......@@ -70,6 +70,7 @@
#include <net/xfrm.h>
#include <net/mpls.h>
#include <net/mptcp.h>
#include <net/page_pool.h>
#include <linux/uaccess.h>
#include <trace/events/skb.h>
......@@ -645,10 +646,13 @@ static void skb_free_head(struct sk_buff *skb)
{
unsigned char *head = skb->head;
if (skb->head_frag)
if (skb->head_frag) {
if (skb_pp_recycle(skb, head))
return;
skb_free_frag(head);
else
} else {
kfree(head);
}
}
static void skb_release_data(struct sk_buff *skb)
......@@ -664,7 +668,7 @@ static void skb_release_data(struct sk_buff *skb)
skb_zcopy_clear(skb, true);
for (i = 0; i < shinfo->nr_frags; i++)
__skb_frag_unref(&shinfo->frags[i]);
__skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
if (shinfo->frag_list)
kfree_skb_list(shinfo->frag_list);
......@@ -1046,6 +1050,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
n->nohdr = 0;
n->peeked = 0;
C(pfmemalloc);
C(pp_recycle);
n->destructor = NULL;
C(tail);
C(end);
......@@ -3495,7 +3500,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragto = &skb_shinfo(tgt)->frags[merge];
skb_frag_size_add(fragto, skb_frag_size(fragfrom));
__skb_frag_unref(fragfrom);
__skb_frag_unref(fragfrom, skb->pp_recycle);
}
/* Reposition in the original skb */
......@@ -5285,6 +5290,13 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
if (skb_cloned(to))
return false;
/* The page pool signature of struct page will eventually figure out
* which pages can be recycled or not but for now let's prohibit slab
* allocated and page_pool allocated SKBs from being coalesced.
*/
if (to->pp_recycle != from->pp_recycle)
return false;
if (len <= skb_tailroom(to)) {
if (len)
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
......
......@@ -128,7 +128,7 @@ static void destroy_record(struct tls_record_info *record)
int i;
for (i = 0; i < record->num_frags; i++)
__skb_frag_unref(&record->frags[i]);
__skb_frag_unref(&record->frags[i], false);
kfree(record);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment