Commit 46e208e7 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-page_pool-add-netlink-based-introspection-part1'

Jakub Kicinski says:

====================
net: page_pool: plit the page_pool_params into fast and slow

Small refactoring in prep for adding more page pool params
which won't be needed on the fast path.

v1:  https://lore.kernel.org/all/20231024160220.3973311-1-kuba@kernel.org/
RFC: https://lore.kernel.org/all/20230816234303.3786178-1-kuba@kernel.org/
====================

Link: https://lore.kernel.org/r/20231121000048.789613-1-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3a17ea77 2da0cac1
...@@ -54,18 +54,22 @@ struct pp_alloc_cache { ...@@ -54,18 +54,22 @@ struct pp_alloc_cache {
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
*/ */
struct page_pool_params { struct page_pool_params {
unsigned int flags; struct_group_tagged(page_pool_params_fast, fast,
unsigned int order; unsigned int flags;
unsigned int pool_size; unsigned int order;
int nid; unsigned int pool_size;
struct device *dev; int nid;
struct napi_struct *napi; struct device *dev;
enum dma_data_direction dma_dir; struct napi_struct *napi;
unsigned int max_len; enum dma_data_direction dma_dir;
unsigned int offset; unsigned int max_len;
unsigned int offset;
);
struct_group_tagged(page_pool_params_slow, slow,
/* private: used by test code only */ /* private: used by test code only */
void (*init_callback)(struct page *page, void *arg); void (*init_callback)(struct page *page, void *arg);
void *init_arg; void *init_arg;
);
}; };
#ifdef CONFIG_PAGE_POOL_STATS #ifdef CONFIG_PAGE_POOL_STATS
...@@ -119,7 +123,9 @@ struct page_pool_stats { ...@@ -119,7 +123,9 @@ struct page_pool_stats {
#endif #endif
struct page_pool { struct page_pool {
struct page_pool_params p; struct page_pool_params_fast p;
bool has_init_callback;
long frag_users; long frag_users;
struct page *frag_page; struct page *frag_page;
...@@ -178,6 +184,9 @@ struct page_pool { ...@@ -178,6 +184,9 @@ struct page_pool {
refcount_t user_cnt; refcount_t user_cnt;
u64 destroy_cnt; u64 destroy_cnt;
/* Slow/Control-path information follows */
struct page_pool_params_slow slow;
}; };
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
......
...@@ -173,7 +173,8 @@ static int page_pool_init(struct page_pool *pool, ...@@ -173,7 +173,8 @@ static int page_pool_init(struct page_pool *pool,
{ {
unsigned int ring_qsize = 1024; /* Default */ unsigned int ring_qsize = 1024; /* Default */
memcpy(&pool->p, params, sizeof(pool->p)); memcpy(&pool->p, &params->fast, sizeof(pool->p));
memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
/* Validate only known flags were used */ /* Validate only known flags were used */
if (pool->p.flags & ~(PP_FLAG_ALL)) if (pool->p.flags & ~(PP_FLAG_ALL))
...@@ -211,6 +212,8 @@ static int page_pool_init(struct page_pool *pool, ...@@ -211,6 +212,8 @@ static int page_pool_init(struct page_pool *pool,
*/ */
} }
pool->has_init_callback = !!pool->slow.init_callback;
#ifdef CONFIG_PAGE_POOL_STATS #ifdef CONFIG_PAGE_POOL_STATS
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
if (!pool->recycle_stats) if (!pool->recycle_stats)
...@@ -388,8 +391,8 @@ static void page_pool_set_pp_info(struct page_pool *pool, ...@@ -388,8 +391,8 @@ static void page_pool_set_pp_info(struct page_pool *pool,
* the overhead is negligible. * the overhead is negligible.
*/ */
page_pool_fragment_page(page, 1); page_pool_fragment_page(page, 1);
if (pool->p.init_callback) if (pool->has_init_callback)
pool->p.init_callback(page, pool->p.init_arg); pool->slow.init_callback(page, pool->slow.init_arg);
} }
static void page_pool_clear_pp_info(struct page *page) static void page_pool_clear_pp_info(struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment