Commit cc10e84b authored by Joe Damato's avatar Joe Damato Committed by David S. Miller

mlx5: add support for page_pool_get_stats

This change adds support for the page_pool_get_stats API to mlx5. If the
user has enabled CONFIG_PAGE_POOL_STATS in their kernel, ethtool will
output page pool stats.
Signed-off-by: default avatarJoe Damato <jdamato@fastly.com>
Acked-by: default avatarSaeed Mahameed <saeed@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a3dd9828
...@@ -37,6 +37,10 @@ ...@@ -37,6 +37,10 @@
#include "en/ptp.h" #include "en/ptp.h"
#include "en/port.h" #include "en/port.h"
#ifdef CONFIG_PAGE_POOL_STATS
#include <net/page_pool.h>
#endif
static unsigned int stats_grps_num(struct mlx5e_priv *priv) static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{ {
return !priv->profile->stats_grps_num ? 0 : return !priv->profile->stats_grps_num ? 0 :
...@@ -183,6 +187,19 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -183,6 +187,19 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
#endif
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
...@@ -349,6 +366,19 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, ...@@ -349,6 +366,19 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_congst_umr += rq_stats->congst_umr; s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err; s->rx_arfs_err += rq_stats->arfs_err;
s->rx_recover += rq_stats->recover; s->rx_recover += rq_stats->recover;
#ifdef CONFIG_PAGE_POOL_STATS
s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill;
s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive;
s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order;
s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached;
s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full;
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
#endif
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
...@@ -455,6 +485,35 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, ...@@ -455,6 +485,35 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
} }
} }
#ifdef CONFIG_PAGE_POOL_STATS
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
{
struct mlx5e_rq_stats *rq_stats = c->rq.stats;
struct page_pool *pool = c->rq.page_pool;
struct page_pool_stats stats = { 0 };
if (!page_pool_get_stats(pool, &stats))
return;
rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
#else
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
{
}
#endif
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{ {
struct mlx5e_sw_stats *s = &priv->stats.sw; struct mlx5e_sw_stats *s = &priv->stats.sw;
...@@ -465,8 +524,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) ...@@ -465,8 +524,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
for (i = 0; i < priv->stats_nch; i++) { for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats = struct mlx5e_channel_stats *channel_stats =
priv->channel_stats[i]; priv->channel_stats[i];
int j; int j;
mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq); mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch); mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
...@@ -1887,6 +1949,19 @@ static const struct counter_desc rq_stats_desc[] = { ...@@ -1887,6 +1949,19 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
#endif
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
......
...@@ -205,7 +205,19 @@ struct mlx5e_sw_stats { ...@@ -205,7 +205,19 @@ struct mlx5e_sw_stats {
u64 ch_aff_change; u64 ch_aff_change;
u64 ch_force_irq; u64 ch_force_irq;
u64 ch_eq_rearm; u64 ch_eq_rearm;
#ifdef CONFIG_PAGE_POOL_STATS
u64 rx_pp_alloc_fast;
u64 rx_pp_alloc_slow;
u64 rx_pp_alloc_slow_high_order;
u64 rx_pp_alloc_empty;
u64 rx_pp_alloc_refill;
u64 rx_pp_alloc_waive;
u64 rx_pp_recycle_cached;
u64 rx_pp_recycle_cache_full;
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
#endif
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets; u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes; u64 tx_tls_encrypted_bytes;
...@@ -352,6 +364,19 @@ struct mlx5e_rq_stats { ...@@ -352,6 +364,19 @@ struct mlx5e_rq_stats {
u64 congst_umr; u64 congst_umr;
u64 arfs_err; u64 arfs_err;
u64 recover; u64 recover;
#ifdef CONFIG_PAGE_POOL_STATS
u64 pp_alloc_fast;
u64 pp_alloc_slow;
u64 pp_alloc_slow_high_order;
u64 pp_alloc_empty;
u64 pp_alloc_refill;
u64 pp_alloc_waive;
u64 pp_recycle_cached;
u64 pp_recycle_cache_full;
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
#endif
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets; u64 tls_decrypted_packets;
u64 tls_decrypted_bytes; u64 tls_decrypted_bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment