Commit b2598318 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: check FE bit before calling hns3_add_frag()

A BD with FE bit means that it is the last BD of a packet,
currently the FE bit is checked before calling hns3_add_frag(),
which is unnecessary because the FE bit may have been checked
in some case.

This patch checks the FE bit before calling hns3_add_frag()
after processing the first BD of a SKB and adjust the location
of memcpy() to reduce duplication.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e1b5e598
...@@ -2788,7 +2788,6 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, ...@@ -2788,7 +2788,6 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va) unsigned char *va)
{ {
#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
struct net_device *netdev = ring_to_netdev(ring); struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb; struct sk_buff *skb;
...@@ -2832,33 +2831,19 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ...@@ -2832,33 +2831,19 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
desc_cb); desc_cb);
ring_ptr_move_fw(ring, next_to_clean); ring_ptr_move_fw(ring, next_to_clean);
return HNS3_NEED_ADD_FRAG; return 0;
} }
static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, static int hns3_add_frag(struct hns3_enet_ring *ring)
bool pending)
{ {
struct sk_buff *skb = ring->skb; struct sk_buff *skb = ring->skb;
struct sk_buff *head_skb = skb; struct sk_buff *head_skb = skb;
struct sk_buff *new_skb; struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
struct hns3_desc *pre_desc; struct hns3_desc *desc;
u32 bd_base_info; u32 bd_base_info;
int pre_bd;
/* if there is pending bd, the SW param next_to_clean has moved do {
* to next and the next is NULL
*/
if (pending) {
pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
ring->desc_num;
pre_desc = &ring->desc[pre_bd];
bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
} else {
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
}
while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
desc = &ring->desc[ring->next_to_clean]; desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info); bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
...@@ -2895,7 +2880,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, ...@@ -2895,7 +2880,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
ring_ptr_move_fw(ring, next_to_clean); ring_ptr_move_fw(ring, next_to_clean);
ring->pending_buf++; ring->pending_buf++;
} } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
return 0; return 0;
} }
...@@ -3063,28 +3048,23 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) ...@@ -3063,28 +3048,23 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
if (ret < 0) /* alloc buffer fail */ if (ret < 0) /* alloc buffer fail */
return ret; return ret;
if (ret > 0) { /* need add frag */ if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
ret = hns3_add_frag(ring, desc, false); ret = hns3_add_frag(ring);
if (ret) if (ret)
return ret; return ret;
/* As the head data may be changed when GRO enable, copy
* the head data in after other data rx completed
*/
memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long)));
} }
} else { } else {
ret = hns3_add_frag(ring, desc, true); ret = hns3_add_frag(ring);
if (ret) if (ret)
return ret; return ret;
}
/* As the head data may be changed when GRO enable, copy /* As the head data may be changed when GRO enable, copy
* the head data in after other data rx completed * the head data in after other data rx completed
*/ */
if (skb->len > HNS3_RX_HEAD_SIZE)
memcpy(skb->data, ring->va, memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long))); ALIGN(ring->pull_len, sizeof(long)));
}
ret = hns3_handle_bdinfo(ring, skb); ret = hns3_handle_bdinfo(ring, skb);
if (unlikely(ret)) { if (unlikely(ret)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment