Commit 2d0de67d authored by Yunsheng Lin's avatar Yunsheng Lin Committed by Jakub Kicinski

net: veth: use newly added page pool API for veth with xdp

Use page_pool_alloc() API to allocate memory with least
memory utilization and performance penalty.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
CC: Lorenzo Bianconi <lorenzo@kernel.org>
CC: Alexander Duyck <alexander.duyck@gmail.com>
CC: Liang Chen <liangchen.linux@gmail.com>
CC: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/20231020095952.11055-6-linyunsheng@huawei.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8ab32fa1
...@@ -737,10 +737,11 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, ...@@ -737,10 +737,11 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb_shared(skb) || skb_head_is_locked(skb) || if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb_shinfo(skb)->nr_frags || skb_shinfo(skb)->nr_frags ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) { skb_headroom(skb) < XDP_PACKET_HEADROOM) {
u32 size, len, max_head_size, off; u32 size, len, max_head_size, off, truesize, page_offset;
struct sk_buff *nskb; struct sk_buff *nskb;
struct page *page; struct page *page;
int i, head_off; int i, head_off;
void *va;
/* We need a private copy of the skb and data buffers since /* We need a private copy of the skb and data buffers since
* the ebpf program can modify it. We segment the original skb * the ebpf program can modify it. We segment the original skb
...@@ -753,14 +754,17 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, ...@@ -753,14 +754,17 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size) if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
goto drop; goto drop;
size = min_t(u32, skb->len, max_head_size);
truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
/* Allocate skb head */ /* Allocate skb head */
page = page_pool_dev_alloc_pages(rq->page_pool); va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
if (!page) if (!va)
goto drop; goto drop;
nskb = napi_build_skb(page_address(page), PAGE_SIZE); nskb = napi_build_skb(va, truesize);
if (!nskb) { if (!nskb) {
page_pool_put_full_page(rq->page_pool, page, true); page_pool_free_va(rq->page_pool, va, true);
goto drop; goto drop;
} }
...@@ -768,7 +772,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, ...@@ -768,7 +772,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
skb_copy_header(nskb, skb); skb_copy_header(nskb, skb);
skb_mark_for_recycle(nskb); skb_mark_for_recycle(nskb);
size = min_t(u32, skb->len, max_head_size);
if (skb_copy_bits(skb, 0, nskb->data, size)) { if (skb_copy_bits(skb, 0, nskb->data, size)) {
consume_skb(nskb); consume_skb(nskb);
goto drop; goto drop;
...@@ -783,14 +786,18 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, ...@@ -783,14 +786,18 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
len = skb->len - off; len = skb->len - off;
for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
page = page_pool_dev_alloc_pages(rq->page_pool); size = min_t(u32, len, PAGE_SIZE);
truesize = size;
page = page_pool_dev_alloc(rq->page_pool, &page_offset,
&truesize);
if (!page) { if (!page) {
consume_skb(nskb); consume_skb(nskb);
goto drop; goto drop;
} }
size = min_t(u32, len, PAGE_SIZE); skb_add_rx_frag(nskb, i, page, page_offset, size,
skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE); truesize);
if (skb_copy_bits(skb, off, page_address(page), if (skb_copy_bits(skb, off, page_address(page),
size)) { size)) {
consume_skb(nskb); consume_skb(nskb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment