Commit e74a726d authored by Hao Chen's avatar Hao Chen Committed by David S. Miller

net: hns3: refactor hns3_nic_reuse_page()

Split rx copybreak handle into a separate function from function
hns3_nic_reuse_page() to improve code simplicity.
Signed-off-by: default avatarHao Chen <chenhao288@hisilicon.com>
Signed-off-by: default avatarGuangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ed0e658c
...@@ -3546,6 +3546,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) ...@@ -3546,6 +3546,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
return page_count(cb->priv) == cb->pagecnt_bias; return page_count(cb->priv) == cb->pagecnt_bias;
} }
static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring,
int pull_len,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
u32 frag_offset = desc_cb->page_offset + pull_len;
int size = le16_to_cpu(desc->rx.size);
u32 frag_size = size - pull_len;
void *frag = napi_alloc_frag(frag_size);
if (unlikely(!frag)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
return -ENOMEM;
}
desc_cb->reuse_flag = 1;
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
return 0;
}
static void hns3_nic_reuse_page(struct sk_buff *skb, int i, static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len, struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
...@@ -3555,6 +3587,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -3555,6 +3587,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size); int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring); u32 truesize = hns3_buf_size(ring);
u32 frag_size = size - pull_len; u32 frag_size = size - pull_len;
int ret = 0;
bool reused; bool reused;
if (ring->page_pool) { if (ring->page_pool) {
...@@ -3589,27 +3622,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -3589,27 +3622,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
desc_cb->page_offset = 0; desc_cb->page_offset = 0;
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
} else if (frag_size <= ring->rx_copybreak) { } else if (frag_size <= ring->rx_copybreak) {
void *frag = napi_alloc_frag(frag_size); ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
if (ret)
if (unlikely(!frag)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
goto out; goto out;
}
desc_cb->reuse_flag = 1;
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
return;
} }
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment