Commit 1b223dd3 authored by Saeed Mahameed's avatar Saeed Mahameed Committed by David S. Miller

net/mlx5e: Fix checksum handling for non-stripped vlan packets

Now as rx-vlan offload can be disabled, packets can be received
with vlan tag not stripped, which means is_first_ethertype_ip will
return false, for that we need to check if the hardware reported
csum OK so we will report CHECKSUM_UNNECESSARY for those packets.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 36350114
...@@ -109,6 +109,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) ...@@ -109,6 +109,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->lro_bytes += rq_stats->lro_bytes; s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none; s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_sw += rq_stats->csum_sw; s->rx_csum_sw += rq_stats->csum_sw;
s->rx_csum_inner += rq_stats->csum_inner;
s->rx_wqe_err += rq_stats->wqe_err; s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler; s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag; s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
......
...@@ -543,16 +543,26 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -543,16 +543,26 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) { if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (likely(is_first_ethertype_ip(skb))) { return;
}
if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum); skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_sw++; rq->stats.csum_sw++;
} else { return;
goto csum_none;
} }
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
(cqe->hds_ip_ext & CQE_L4_OK))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
skb->encapsulation = 1;
rq->stats.csum_inner++;
}
return; return;
}
csum_none: csum_none:
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
rq->stats.csum_none++; rq->stats.csum_none++;
......
...@@ -62,6 +62,7 @@ struct mlx5e_sw_stats { ...@@ -62,6 +62,7 @@ struct mlx5e_sw_stats {
u64 rx_csum_good; u64 rx_csum_good;
u64 rx_csum_none; u64 rx_csum_none;
u64 rx_csum_sw; u64 rx_csum_sw;
u64 rx_csum_inner;
u64 tx_csum_offload; u64 tx_csum_offload;
u64 tx_csum_inner; u64 tx_csum_inner;
u64 tx_queue_stopped; u64 tx_queue_stopped;
...@@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
...@@ -272,8 +274,9 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { ...@@ -272,8 +274,9 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
struct mlx5e_rq_stats { struct mlx5e_rq_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
u64 csum_none;
u64 csum_sw; u64 csum_sw;
u64 csum_inner;
u64 csum_none;
u64 lro_packets; u64 lro_packets;
u64 lro_bytes; u64 lro_bytes;
u64 wqe_err; u64 wqe_err;
...@@ -285,8 +288,9 @@ struct mlx5e_rq_stats { ...@@ -285,8 +288,9 @@ struct mlx5e_rq_stats {
static const struct counter_desc rq_stats_desc[] = { static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
......
...@@ -645,7 +645,8 @@ struct mlx5_err_cqe { ...@@ -645,7 +645,8 @@ struct mlx5_err_cqe {
}; };
struct mlx5_cqe64 { struct mlx5_cqe64 {
u8 rsvd0[2]; u8 outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id; __be16 wqe_id;
u8 lro_tcppsh_abort_dupack; u8 lro_tcppsh_abort_dupack;
u8 lro_min_ttl; u8 lro_min_ttl;
...@@ -659,7 +660,7 @@ struct mlx5_cqe64 { ...@@ -659,7 +660,7 @@ struct mlx5_cqe64 {
__be16 slid; __be16 slid;
__be32 flags_rqpn; __be32 flags_rqpn;
u8 hds_ip_ext; u8 hds_ip_ext;
u8 l4_hdr_type_etc; u8 l4_l3_hdr_type;
__be16 vlan_info; __be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey; __be32 imm_inval_pkey;
...@@ -680,12 +681,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) ...@@ -680,12 +681,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
{ {
return (cqe->l4_hdr_type_etc >> 4) & 0x7; return (cqe->l4_l3_hdr_type >> 4) & 0x7;
}
static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
{
return (cqe->l4_l3_hdr_type >> 2) & 0x3;
}
static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
return cqe->outer_l3_tunneled & 0x1;
} }
static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
{ {
return !!(cqe->l4_hdr_type_etc & 0x1); return !!(cqe->l4_l3_hdr_type & 0x1);
} }
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment