Commit bfe6d8d1 authored by Gal Pressman's avatar Gal Pressman Committed by David S. Miller

net/mlx5e: Reorganize ethtool statistics

Categorize and reorganize ethtool statistics counters by renaming to
"rx_*" and "tx_*" and removing redundant and duplicated counters, this
way they are easier to grasp and more user friendly.
Signed-off-by: default avatarGal Pressman <galp@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ed80ec4c
......@@ -213,42 +213,41 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
/* SW counters */
for (i = 0; i < NUM_SW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
/* Q counters */
for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
/* VPORT counters */
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_stats_desc[i].name);
vport_stats_desc[i].format);
/* PPORT counters */
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_802_3_stats_desc[i].name);
pport_802_3_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2863_stats_desc[i].name);
pport_2863_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].name);
pport_2819_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
prio,
pport_per_prio_traffic_stats_desc[i].name);
sprintf(data + (idx++) * ETH_GSTRING_LEN,
pport_per_prio_traffic_stats_desc[i].format, prio);
}
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
prio, pport_per_prio_pfc_stats_desc[i].name);
sprintf(data + (idx++) * ETH_GSTRING_LEN,
pport_per_prio_pfc_stats_desc[i].format, prio);
}
}
......@@ -258,16 +257,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
rq_stats_desc[j].name);
sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
"tx%d_%s",
priv->channeltc_to_txq_map[i][tc],
sq_stats_desc[j].name);
sq_stats_desc[j].format,
priv->channeltc_to_txq_map[i][tc]);
}
static void mlx5e_get_strings(struct net_device *dev,
......
......@@ -105,11 +105,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_sw += rq_stats->csum_sw;
s->rx_csum_inner += rq_stats->csum_inner;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
......@@ -122,24 +122,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes;
s->tso_inner_packets += sq_stats->tso_inner_packets;
s->tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_tso_packets += sq_stats->tso_packets;
s->tx_tso_bytes += sq_stats->tso_bytes;
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_csum_inner += sq_stats->csum_offload_inner;
tx_offload_none += sq_stats->csum_offload_none;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
tx_offload_none += sq_stats->csum_none;
}
}
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
s->rx_csum_good = s->rx_packets - s->rx_csum_none -
s->rx_csum_sw;
s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
s->link_down_events = MLX5_GET(ppcnt_reg,
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
}
......
......@@ -689,7 +689,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_sw++;
rq->stats.csum_complete++;
return;
}
......@@ -699,7 +699,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
skb->encapsulation = 1;
rq->stats.csum_inner++;
rq->stats.csum_unnecessary_inner++;
}
return;
}
......
......@@ -192,12 +192,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
sq->stats.csum_offload_inner++;
sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
sq->stats.csum_offload_none++;
sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment