Commit 6bf071bf authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller

xdp: page_pool related fix to cpumap

When converting an xdp_frame into an SKB, and sending this into the network
stack, then the underlying XDP memory model need to release associated
resources, because the network stack don't have callbacks for XDP memory
models.  The only memory model that needs this is page_pool, when a driver
use the DMA-mapping feature.

Introduce page_pool_release_page(), which basically does the same as
page_pool_unmap_page(). Add xdp_release_frame() as the XDP memory model
interface for calling it, if the memory model match MEM_TYPE_PAGE_POOL, to
save the function call overhead for others. Have cpumap call
xdp_release_frame() before xdp_scrub_frame().
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 516a7593
...@@ -110,7 +110,6 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) ...@@ -110,7 +110,6 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
struct page_pool *page_pool_create(const struct page_pool_params *params); struct page_pool *page_pool_create(const struct page_pool_params *params);
void page_pool_destroy(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool);
void page_pool_unmap_page(struct page_pool *pool, struct page *page);
/* Never call this directly, use helpers below */ /* Never call this directly, use helpers below */
void __page_pool_put_page(struct page_pool *pool, void __page_pool_put_page(struct page_pool *pool,
...@@ -133,6 +132,20 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, ...@@ -133,6 +132,20 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
__page_pool_put_page(pool, page, true); __page_pool_put_page(pool, page, true);
} }
/* Disconnects a page (from a page_pool). API users can have a need
* to disconnect a page (from a page_pool), to allow it to be used as
* a regular page (that will eventually be returned to the normal
* page-allocator via put_page).
*/
void page_pool_unmap_page(struct page_pool *pool, struct page *page);
static inline void page_pool_release_page(struct page_pool *pool,
struct page *page)
{
#ifdef CONFIG_PAGE_POOL
page_pool_unmap_page(pool, page);
#endif
}
static inline dma_addr_t page_pool_get_dma_addr(struct page *page) static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{ {
return page->dma_addr; return page->dma_addr;
......
...@@ -129,6 +129,21 @@ void xdp_return_frame(struct xdp_frame *xdpf); ...@@ -129,6 +129,21 @@ void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp); void xdp_return_buff(struct xdp_buff *xdp);
/* When sending xdp_frame into the network stack, then there is no
* return point callback, which is needed to release e.g. DMA-mapping
* resources with page_pool. Thus, have explicit function to release
* frame resources.
*/
void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
static inline void xdp_release_frame(struct xdp_frame *xdpf)
{
struct xdp_mem_info *mem = &xdpf->mem;
/* Curr only page_pool needs this */
if (mem->type == MEM_TYPE_PAGE_POOL)
__xdp_release_frame(xdpf->data, mem);
}
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
struct net_device *dev, u32 queue_index); struct net_device *dev, u32 queue_index);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
......
...@@ -208,6 +208,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, ...@@ -208,6 +208,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
* - RX ring dev queue index (skb_record_rx_queue) * - RX ring dev queue index (skb_record_rx_queue)
*/ */
/* Until page_pool get SKB return path, release DMA here */
xdp_release_frame(xdpf);
/* Allow SKB to reuse area used by xdp_frame */ /* Allow SKB to reuse area used by xdp_frame */
xdp_scrub_frame(xdpf); xdp_scrub_frame(xdpf);
......
...@@ -381,6 +381,21 @@ void xdp_return_buff(struct xdp_buff *xdp) ...@@ -381,6 +381,21 @@ void xdp_return_buff(struct xdp_buff *xdp)
} }
EXPORT_SYMBOL_GPL(xdp_return_buff); EXPORT_SYMBOL_GPL(xdp_return_buff);
/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
{
struct xdp_mem_allocator *xa;
struct page *page;
rcu_read_lock();
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data);
if (xa)
page_pool_release_page(xa->page_pool, page);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(__xdp_release_frame);
int xdp_attachment_query(struct xdp_attachment_info *info, int xdp_attachment_query(struct xdp_attachment_info *info,
struct netdev_bpf *bpf) struct netdev_bpf *bpf)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment