Commit aeda9bf8 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: batch the page reference count updates

Batch the page reference count updates instead of doing them
one at a time. By doing this we can improve the overall receive
performance by avoid some atomic increment operations when the
rx page is reused.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b948577b
...@@ -2302,6 +2302,8 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, ...@@ -2302,6 +2302,8 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->buf = page_address(p); cb->buf = page_address(p);
cb->length = hns3_page_size(ring); cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE; cb->type = DESC_TYPE_PAGE;
page_ref_add(p, USHRT_MAX - 1);
cb->pagecnt_bias = USHRT_MAX;
return 0; return 0;
} }
...@@ -2311,8 +2313,8 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring, ...@@ -2311,8 +2313,8 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring,
{ {
if (cb->type == DESC_TYPE_SKB) if (cb->type == DESC_TYPE_SKB)
dev_kfree_skb_any((struct sk_buff *)cb->priv); dev_kfree_skb_any((struct sk_buff *)cb->priv);
else if (!HNAE3_IS_TX_RING(ring)) else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
put_page((struct page *)cb->priv); __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
memset(cb, 0, sizeof(*cb)); memset(cb, 0, sizeof(*cb));
} }
...@@ -2610,6 +2612,11 @@ static bool hns3_page_is_reusable(struct page *page) ...@@ -2610,6 +2612,11 @@ static bool hns3_page_is_reusable(struct page *page)
!page_is_pfmemalloc(page); !page_is_pfmemalloc(page);
} }
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
}
static void hns3_nic_reuse_page(struct sk_buff *skb, int i, static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len, struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
...@@ -2618,6 +2625,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2618,6 +2625,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size); int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring); u32 truesize = hns3_buf_size(ring);
desc_cb->pagecnt_bias--;
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize); size - pull_len, truesize);
...@@ -2625,20 +2633,27 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2625,20 +2633,27 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
* when page_offset rollback to zero, flag default unreuse * when page_offset rollback to zero, flag default unreuse
*/ */
if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return; return;
}
/* Move offset up to the next cache line */ /* Move offset up to the next cache line */
desc_cb->page_offset += truesize; desc_cb->page_offset += truesize;
if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given */ } else if (hns3_can_reuse_page(desc_cb)) {
get_page(desc_cb->priv);
} else if (page_count(desc_cb->priv) == 1) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
desc_cb->page_offset = 0; desc_cb->page_offset = 0;
get_page(desc_cb->priv); } else if (desc_cb->pagecnt_bias) {
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
}
if (unlikely(!desc_cb->pagecnt_bias)) {
page_ref_add(desc_cb->priv, USHRT_MAX);
desc_cb->pagecnt_bias = USHRT_MAX;
} }
} }
...@@ -2846,7 +2861,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ...@@ -2846,7 +2861,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (likely(hns3_page_is_reusable(desc_cb->priv))) if (likely(hns3_page_is_reusable(desc_cb->priv)))
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */ else /* This page cannot be reused so discard it */
put_page(desc_cb->priv); __page_frag_cache_drain(desc_cb->priv,
desc_cb->pagecnt_bias);
ring_ptr_move_fw(ring, next_to_clean); ring_ptr_move_fw(ring, next_to_clean);
return 0; return 0;
......
...@@ -287,6 +287,7 @@ struct hns3_desc_cb { ...@@ -287,6 +287,7 @@ struct hns3_desc_cb {
/* desc type, used by the ring user to mark the type of the priv data */ /* desc type, used by the ring user to mark the type of the priv data */
u16 type; u16 type;
u16 pagecnt_bias;
}; };
enum hns3_pkt_l3type { enum hns3_pkt_l3type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment