Commit 6bfef2ec authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-page_pool-remove-page_pool_release_page'

Jakub Kicinski says:

====================
net: page_pool: remove page_pool_release_page()

page_pool_return_page() is a historic artefact from before
recycling of pages attached to skbs was supported. Theoretical
uses for it may be thought up but in practice all existing
users can be converted to use skb_mark_for_recycle() instead.

This code was previously posted as part of the memory provider RFC.
https://lore.kernel.org/all/20230707183935.997267-1-kuba@kernel.org/
====================

Link: https://lore.kernel.org/r/20230720010409.1967072-1-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 5766946e 07e0c7d3
...@@ -13,9 +13,9 @@ replacing dev_alloc_pages(). ...@@ -13,9 +13,9 @@ replacing dev_alloc_pages().
API keeps track of in-flight pages, in order to let API user know API keeps track of in-flight pages, in order to let API user know
when it is safe to free a page_pool object. Thus, API users when it is safe to free a page_pool object. Thus, API users
must run page_pool_release_page() when a page is leaving the page_pool or must call page_pool_put_page() to free the page, or attach
call page_pool_put_page() where appropriate in order to maintain correct the page to a page_pool-aware objects like skbs marked with
accounting. skb_mark_for_recycle().
API user must call page_pool_put_page() once on a page, as it API user must call page_pool_put_page() once on a page, as it
will either recycle the page, or in case of refcnt > 1, it will will either recycle the page, or in case of refcnt > 1, it will
...@@ -87,9 +87,6 @@ a page will cause no race conditions is enough. ...@@ -87,9 +87,6 @@ a page will cause no race conditions is enough.
must guarantee safe context (e.g NAPI), since it will recycle the page must guarantee safe context (e.g NAPI), since it will recycle the page
directly into the pool fast cache. directly into the pool fast cache.
* page_pool_release_page(): Unmap the page (if mapped) and account for it on
in-flight counters.
* page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool * page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool
caches. caches.
...@@ -194,7 +191,7 @@ NAPI poller ...@@ -194,7 +191,7 @@ NAPI poller
if XDP_DROP: if XDP_DROP:
page_pool_recycle_direct(page_pool, page); page_pool_recycle_direct(page_pool, page);
} else (packet_is_skb) { } else (packet_is_skb) {
page_pool_release_page(page_pool, page); skb_mark_for_recycle(skb);
new_page = page_pool_dev_alloc_pages(page_pool); new_page = page_pool_dev_alloc_pages(page_pool);
} }
} }
......
...@@ -1333,7 +1333,7 @@ static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, ...@@ -1333,7 +1333,7 @@ static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
skb = tsnep_build_skb(rx, page, length); skb = tsnep_build_skb(rx, page, length);
if (skb) { if (skb) {
page_pool_release_page(rx->page_pool, page); skb_mark_for_recycle(skb);
rx->packets++; rx->packets++;
rx->bytes += length; rx->bytes += length;
......
...@@ -5441,7 +5441,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -5441,7 +5441,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dma_conf.dma_buf_sz); priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */ /* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page); skb_mark_for_recycle(skb);
buf->page = NULL; buf->page = NULL;
} }
...@@ -5453,7 +5453,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -5453,7 +5453,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dma_conf.dma_buf_sz); priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */ /* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page); skb_mark_for_recycle(skb);
buf->sec_page = NULL; buf->sec_page = NULL;
} }
......
...@@ -18,9 +18,8 @@ ...@@ -18,9 +18,8 @@
* *
* API keeps track of in-flight pages, in-order to let API user know * API keeps track of in-flight pages, in-order to let API user know
* when it is safe to dealloactor page_pool object. Thus, API users * when it is safe to dealloactor page_pool object. Thus, API users
* must make sure to call page_pool_release_page() when a page is * must call page_pool_put_page() where appropriate and only attach
* "leaving" the page_pool. Or call page_pool_put_page() where * the page to a page_pool-aware objects, like skbs marked for recycling.
* appropiate. For maintaining correct accounting.
* *
* API user must only call page_pool_put_page() once on a page, as it * API user must only call page_pool_put_page() once on a page, as it
* will either recycle the page, or in case of elevated refcnt, it * will either recycle the page, or in case of elevated refcnt, it
...@@ -251,7 +250,6 @@ void page_pool_unlink_napi(struct page_pool *pool); ...@@ -251,7 +250,6 @@ void page_pool_unlink_napi(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem); struct xdp_mem_info *mem);
void page_pool_release_page(struct page_pool *pool, struct page *page);
void page_pool_put_page_bulk(struct page_pool *pool, void **data, void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count); int count);
#else #else
...@@ -268,10 +266,6 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool, ...@@ -268,10 +266,6 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool,
struct xdp_mem_info *mem) struct xdp_mem_info *mem)
{ {
} }
static inline void page_pool_release_page(struct page_pool *pool,
struct page *page)
{
}
static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count) int count)
......
...@@ -492,7 +492,7 @@ static s32 page_pool_inflight(struct page_pool *pool) ...@@ -492,7 +492,7 @@ static s32 page_pool_inflight(struct page_pool *pool)
* a regular page (that will eventually be returned to the normal * a regular page (that will eventually be returned to the normal
* page-allocator via put_page). * page-allocator via put_page).
*/ */
void page_pool_release_page(struct page_pool *pool, struct page *page) static void page_pool_return_page(struct page_pool *pool, struct page *page)
{ {
dma_addr_t dma; dma_addr_t dma;
int count; int count;
...@@ -518,13 +518,6 @@ void page_pool_release_page(struct page_pool *pool, struct page *page) ...@@ -518,13 +518,6 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
*/ */
count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
trace_page_pool_state_release(pool, page, count); trace_page_pool_state_release(pool, page, count);
}
EXPORT_SYMBOL(page_pool_release_page);
/* Return a page to the page allocator, cleaning up our state */
static void page_pool_return_page(struct page_pool *pool, struct page *page)
{
page_pool_release_page(pool, page);
put_page(page); put_page(page);
/* An optimization would be to call __free_pages(page, pool->p.order) /* An optimization would be to call __free_pages(page, pool->p.order)
...@@ -616,9 +609,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, ...@@ -616,9 +609,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
* will be invoking put_page. * will be invoking put_page.
*/ */
recycle_stat_inc(pool, released_refcnt); recycle_stat_inc(pool, released_refcnt);
/* Do not replace this with page_pool_return_page() */ page_pool_return_page(pool, page);
page_pool_release_page(pool, page);
put_page(page);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment