Commit 89db09eb authored by Matthew Finlay's avatar Matthew Finlay Committed by David S. Miller

net/mlx5e: Add TX inner packet counters

Add TSO and TX checksum counters for tunneled, inner packets
Signed-off-by: default avatarMatthew Finlay <matt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 98795158
...@@ -103,12 +103,15 @@ static const char vport_strings[][ETH_GSTRING_LEN] = { ...@@ -103,12 +103,15 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
/* SW counters */ /* SW counters */
"tso_packets", "tso_packets",
"tso_bytes", "tso_bytes",
"tso_inner_packets",
"tso_inner_bytes",
"lro_packets", "lro_packets",
"lro_bytes", "lro_bytes",
"rx_csum_good", "rx_csum_good",
"rx_csum_none", "rx_csum_none",
"rx_csum_sw", "rx_csum_sw",
"tx_csum_offload", "tx_csum_offload",
"tx_csum_inner",
"tx_queue_stopped", "tx_queue_stopped",
"tx_queue_wake", "tx_queue_wake",
"tx_queue_dropped", "tx_queue_dropped",
...@@ -141,18 +144,21 @@ struct mlx5e_vport_stats { ...@@ -141,18 +144,21 @@ struct mlx5e_vport_stats {
/* SW counters */ /* SW counters */
u64 tso_packets; u64 tso_packets;
u64 tso_bytes; u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 lro_packets; u64 lro_packets;
u64 lro_bytes; u64 lro_bytes;
u64 rx_csum_good; u64 rx_csum_good;
u64 rx_csum_none; u64 rx_csum_none;
u64 rx_csum_sw; u64 rx_csum_sw;
u64 tx_csum_offload; u64 tx_csum_offload;
u64 tx_csum_inner;
u64 tx_queue_stopped; u64 tx_queue_stopped;
u64 tx_queue_wake; u64 tx_queue_wake;
u64 tx_queue_dropped; u64 tx_queue_dropped;
u64 rx_wqe_err; u64 rx_wqe_err;
#define NUM_VPORT_COUNTERS 32 #define NUM_VPORT_COUNTERS 35
}; };
static const char pport_strings[][ETH_GSTRING_LEN] = { static const char pport_strings[][ETH_GSTRING_LEN] = {
...@@ -252,7 +258,10 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = { ...@@ -252,7 +258,10 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
"packets", "packets",
"tso_packets", "tso_packets",
"tso_bytes", "tso_bytes",
"tso_inner_packets",
"tso_inner_bytes",
"csum_offload_none", "csum_offload_none",
"csum_offload_inner",
"stopped", "stopped",
"wake", "wake",
"dropped", "dropped",
...@@ -263,12 +272,15 @@ struct mlx5e_sq_stats { ...@@ -263,12 +272,15 @@ struct mlx5e_sq_stats {
u64 packets; u64 packets;
u64 tso_packets; u64 tso_packets;
u64 tso_bytes; u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 csum_offload_none; u64 csum_offload_none;
u64 csum_offload_inner;
u64 stopped; u64 stopped;
u64 wake; u64 wake;
u64 dropped; u64 dropped;
u64 nop; u64 nop;
#define NUM_SQ_STATS 8 #define NUM_SQ_STATS 11
}; };
struct mlx5e_stats { struct mlx5e_stats {
......
...@@ -145,9 +145,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) ...@@ -145,9 +145,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
/* Collect firts the SW counters and then HW for consistency */ /* Collect firts the SW counters and then HW for consistency */
s->tso_packets = 0; s->tso_packets = 0;
s->tso_bytes = 0; s->tso_bytes = 0;
s->tso_inner_packets = 0;
s->tso_inner_bytes = 0;
s->tx_queue_stopped = 0; s->tx_queue_stopped = 0;
s->tx_queue_wake = 0; s->tx_queue_wake = 0;
s->tx_queue_dropped = 0; s->tx_queue_dropped = 0;
s->tx_csum_inner = 0;
tx_offload_none = 0; tx_offload_none = 0;
s->lro_packets = 0; s->lro_packets = 0;
s->lro_bytes = 0; s->lro_bytes = 0;
...@@ -168,9 +171,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) ...@@ -168,9 +171,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tso_packets += sq_stats->tso_packets; s->tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes; s->tso_bytes += sq_stats->tso_bytes;
s->tso_inner_packets += sq_stats->tso_inner_packets;
s->tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake; s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped; s->tx_queue_dropped += sq_stats->dropped;
s->tx_csum_inner += sq_stats->csum_offload_inner;
tx_offload_none += sq_stats->csum_offload_none; tx_offload_none += sq_stats->csum_offload_none;
} }
} }
...@@ -245,7 +251,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) ...@@ -245,7 +251,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tx_broadcast_bytes; s->tx_broadcast_bytes;
/* Update calculated offload counters */ /* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none; s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
s->rx_csum_good = s->rx_packets - s->rx_csum_none - s->rx_csum_good = s->rx_packets - s->rx_csum_none -
s->rx_csum_sw; s->rx_csum_sw;
......
...@@ -187,11 +187,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -187,11 +187,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM; MLX5_ETH_WQE_L4_INNER_CSUM;
else sq->stats.csum_offload_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else } else
sq->stats.csum_offload_none++; sq->stats.csum_offload_none++;
...@@ -201,21 +203,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -201,21 +203,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
} }
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
u32 payload_len;
eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
opcode = MLX5_OPCODE_LSO; opcode = MLX5_OPCODE_LSO;
if (skb->encapsulation) if (skb->encapsulation) {
ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
else sq->stats.tso_inner_packets++;
sq->stats.tso_inner_bytes += skb->len - ihs;
} else {
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
sq->stats.tso_packets++;
sq->stats.tso_bytes += skb->len - ihs;
}
payload_len = skb->len - ihs;
wi->num_bytes = skb->len + wi->num_bytes = skb->len +
(skb_shinfo(skb)->gso_segs - 1) * ihs; (skb_shinfo(skb)->gso_segs - 1) * ihs;
sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len;
} else { } else {
bf = sq->bf_budget && bf = sq->bf_budget &&
!skb->xmit_more && !skb->xmit_more &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment