Commit d474d88f authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: add hns3_gro_complete for HW GRO process

When a GRO packet is received by driver, the cwr field in the
struct tcphdr needs to be checked to decide whether to set the
SKB_GSO_TCP_ECN for skb_shinfo(skb)->gso_type.

So this patch adds hns3_gro_complete to do that, and adds the
hns3_handle_bdinfo to handle the hns3_gro_complete and
hns3_rx_checksum.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a4d2cdcb
...@@ -2331,6 +2331,44 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2331,6 +2331,44 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
} }
} }
static int hns3_gro_complete(struct sk_buff *skb)
{
__be16 type = skb->protocol;
struct tcphdr *th;
int depth = 0;
while (type == htons(ETH_P_8021Q)) {
struct vlan_hdr *vh;
if ((depth + VLAN_HLEN) > skb_headlen(skb))
return -EFAULT;
vh = (struct vlan_hdr *)(skb->data + depth);
type = vh->h_vlan_encapsulated_proto;
depth += VLAN_HLEN;
}
if (type == htons(ETH_P_IP)) {
depth += sizeof(struct iphdr);
} else if (type == htons(ETH_P_IPV6)) {
depth += sizeof(struct ipv6hdr);
} else {
netdev_err(skb->dev,
"Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
be16_to_cpu(type), depth);
return -EFAULT;
}
th = (struct tcphdr *)(skb->data + depth);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
}
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info) u32 l234info, u32 bd_base_info)
{ {
...@@ -2345,12 +2383,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, ...@@ -2345,12 +2383,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM)) if (!(netdev->features & NETIF_F_RXCSUM))
return; return;
/* We MUST enable hardware checksum before enabling hardware GRO */
if (skb_shinfo(skb)->gso_size) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
}
/* check if hardware has done checksum */ /* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return; return;
...@@ -2567,7 +2599,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, ...@@ -2567,7 +2599,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return 0; return 0;
} }
static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info,
u32 bd_base_info) u32 bd_base_info)
{ {
u16 gro_count; u16 gro_count;
...@@ -2576,12 +2609,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, ...@@ -2576,12 +2609,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M, gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S); HNS3_RXD_GRO_COUNT_S);
/* if there is no HW GRO, do not set gro params */ /* if there is no HW GRO, do not set gro params */
if (!gro_count) if (!gro_count) {
return; hns3_rx_checksum(ring, skb, l234info, bd_base_info);
return 0;
}
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
NAPI_GRO_CB(skb)->count = gro_count; NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
...@@ -2591,13 +2623,13 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, ...@@ -2591,13 +2623,13 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
else if (l3_type == HNS3_L3_TYPE_IPV6) else if (l3_type == HNS3_L3_TYPE_IPV6)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
else else
return; return -EFAULT;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M, HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S); HNS3_RXD_GRO_SIZE_S);
if (skb_shinfo(skb)->gso_size)
tcp_gro_complete(skb); return hns3_gro_complete(skb);
} }
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
...@@ -2622,16 +2654,85 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, ...@@ -2622,16 +2654,85 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
} }
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb,
struct sk_buff **out_skb) struct hns3_desc *desc)
{ {
struct net_device *netdev = ring->tqp->handle->kinfo.netdev; struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
u32 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
u32 l234info = le32_to_cpu(desc->rx.l234_info);
enum hns3_pkt_l2t_type l2_frame_type; enum hns3_pkt_l2t_type l2_frame_type;
unsigned int len;
int ret;
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tag);
}
if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.non_vld_descs++;
u64_stats_update_end(&ring->syncp);
return -EINVAL;
}
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
u64_stats_update_begin(&ring->syncp);
if (l234info & BIT(HNS3_RXD_L2E_B))
ring->stats.l2_err++;
else
ring->stats.err_pkt_len++;
u64_stats_update_end(&ring->syncp);
return -EFAULT;
}
len = skb->len;
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
/* This is needed in order to enable forwarding support */
ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
if (unlikely(ret)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.rx_err_cnt++;
u64_stats_update_end(&ring->syncp);
return ret;
}
l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
HNS3_RXD_DMAC_S);
u64_stats_update_begin(&ring->syncp);
ring->stats.rx_pkts++;
ring->stats.rx_bytes += len;
if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
ring->stats.rx_multicast++;
u64_stats_update_end(&ring->syncp);
ring->tqp_vector->rx_group.total_bytes += len;
return 0;
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
struct sk_buff *skb = ring->skb; struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc; struct hns3_desc *desc;
u32 bd_base_info; u32 bd_base_info;
u32 l234info;
int length; int length;
int ret; int ret;
...@@ -2691,62 +2792,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2691,62 +2792,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long))); ALIGN(ring->pull_len, sizeof(long)));
} }
l234info = le32_to_cpu(desc->rx.l234_info); ret = hns3_handle_bdinfo(ring, skb, desc);
bd_base_info = le32_to_cpu(desc->rx.bd_base_info); if (unlikely(ret)) {
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
__vlan_hwaccel_put_tag(skb,
htons(ETH_P_8021Q),
vlan_tag);
}
if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.non_vld_descs++;
u64_stats_update_end(&ring->syncp);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return -EINVAL; return ret;
}
if (unlikely((!desc->rx.pkt_len) ||
(l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
u64_stats_update_begin(&ring->syncp);
if (l234info & BIT(HNS3_RXD_L2E_B))
ring->stats.l2_err++;
else
ring->stats.err_pkt_len++;
u64_stats_update_end(&ring->syncp);
dev_kfree_skb_any(skb);
return -EFAULT;
} }
l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
HNS3_RXD_DMAC_S);
u64_stats_update_begin(&ring->syncp);
if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
ring->stats.rx_multicast++;
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
u64_stats_update_end(&ring->syncp);
ring->tqp_vector->rx_group.total_bytes += skb->len;
/* This is needed in order to enable forwarding support */
hns3_set_gro_param(skb, l234info, bd_base_info);
hns3_rx_checksum(ring, skb, l234info, bd_base_info);
*out_skb = skb; *out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb); hns3_set_rx_skb_rss_type(ring, skb);
...@@ -2758,7 +2809,6 @@ int hns3_clean_rx_ring( ...@@ -2758,7 +2809,6 @@ int hns3_clean_rx_ring(
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{ {
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err; int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns3_desc_unused(ring) - ring->pending_buf; int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
struct sk_buff *skb = ring->skb; struct sk_buff *skb = ring->skb;
...@@ -2795,8 +2845,6 @@ int hns3_clean_rx_ring( ...@@ -2795,8 +2845,6 @@ int hns3_clean_rx_ring(
continue; continue;
} }
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb); rx_fn(ring, skb);
recv_bds += ring->pending_buf; recv_bds += ring->pending_buf;
clean_count += ring->pending_buf; clean_count += ring->pending_buf;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment