Commit 9152cff0 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by David S. Miller

veth: introduce more xdp counters

Introduce xdp_xmit counter in order to distinguish between XDP_TX and
ndo_xdp_xmit stats. Introduce the following ethtool counters:
- rx_xdp_tx
- rx_xdp_tx_errors
- tx_xdp_xmit
- tx_xdp_xmit_errors
- rx_xdp_redirect
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 66fe4a07
...@@ -44,6 +44,9 @@ struct veth_stats { ...@@ -44,6 +44,9 @@ struct veth_stats {
u64 xdp_redirect; u64 xdp_redirect;
u64 xdp_drops; u64 xdp_drops;
u64 xdp_tx; u64 xdp_tx;
u64 xdp_tx_err;
u64 xdp_xmit;
u64 xdp_xmit_err;
}; };
struct veth_rq_stats { struct veth_rq_stats {
...@@ -89,8 +92,13 @@ struct veth_q_stat_desc { ...@@ -89,8 +92,13 @@ struct veth_q_stat_desc {
static const struct veth_q_stat_desc veth_rq_stats_desc[] = { static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
{ "xdp_packets", VETH_RQ_STAT(xdp_packets) }, { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
{ "xdp_bytes", VETH_RQ_STAT(xdp_bytes) }, { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
{ "xdp_drops", VETH_RQ_STAT(xdp_drops) },
{ "rx_drops", VETH_RQ_STAT(rx_drops) }, { "rx_drops", VETH_RQ_STAT(rx_drops) },
{ "rx_xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
{ "rx_xdp_drops", VETH_RQ_STAT(xdp_drops) },
{ "rx_xdp_tx", VETH_RQ_STAT(xdp_tx) },
{ "rx_xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
{ "tx_xdp_xmit", VETH_RQ_STAT(xdp_xmit) },
{ "tx_xdp_xmit_errors", VETH_RQ_STAT(xdp_xmit_err) },
}; };
#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc) #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
...@@ -129,7 +137,7 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) ...@@ -129,7 +137,7 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
for (i = 0; i < dev->real_num_rx_queues; i++) { for (i = 0; i < dev->real_num_rx_queues; i++) {
for (j = 0; j < VETH_RQ_STATS_LEN; j++) { for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"rx_queue_%u_%.11s", "rx_queue_%u_%.18s",
i, veth_rq_stats_desc[j].desc); i, veth_rq_stats_desc[j].desc);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
...@@ -374,12 +382,13 @@ static int veth_select_rxq(struct net_device *dev) ...@@ -374,12 +382,13 @@ static int veth_select_rxq(struct net_device *dev)
} }
static int veth_xdp_xmit(struct net_device *dev, int n, static int veth_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags) struct xdp_frame **frames,
u32 flags, bool ndo_xmit)
{ {
struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
unsigned int qidx, max_len;
struct net_device *rcv; struct net_device *rcv;
int i, ret, drops = n; int i, ret, drops = n;
unsigned int max_len;
struct veth_rq *rq; struct veth_rq *rq;
rcu_read_lock(); rcu_read_lock();
...@@ -395,7 +404,8 @@ static int veth_xdp_xmit(struct net_device *dev, int n, ...@@ -395,7 +404,8 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
} }
rcv_priv = netdev_priv(rcv); rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)]; qidx = veth_select_rxq(rcv);
rq = &rcv_priv->rq[qidx];
/* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
* side. This means an XDP program is loaded on the peer and the peer * side. This means an XDP program is loaded on the peer and the peer
* device is up. * device is up.
...@@ -424,6 +434,17 @@ static int veth_xdp_xmit(struct net_device *dev, int n, ...@@ -424,6 +434,17 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH) if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq); __veth_xdp_flush(rq);
rq = &priv->rq[qidx];
u64_stats_update_begin(&rq->stats.syncp);
if (ndo_xmit) {
rq->stats.vs.xdp_xmit += n - drops;
rq->stats.vs.xdp_xmit_err += drops;
} else {
rq->stats.vs.xdp_tx += n - drops;
rq->stats.vs.xdp_tx_err += drops;
}
u64_stats_update_end(&rq->stats.syncp);
if (likely(!drops)) { if (likely(!drops)) {
rcu_read_unlock(); rcu_read_unlock();
return n; return n;
...@@ -437,11 +458,17 @@ static int veth_xdp_xmit(struct net_device *dev, int n, ...@@ -437,11 +458,17 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
return ret; return ret;
} }
static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
return veth_xdp_xmit(dev, n, frames, flags, true);
}
static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq) static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq)
{ {
int sent, i, err = 0; int sent, i, err = 0;
sent = veth_xdp_xmit(dev, bq->count, bq->q, 0); sent = veth_xdp_xmit(dev, bq->count, bq->q, 0, false);
if (sent < 0) { if (sent < 0) {
err = sent; err = sent;
sent = 0; sent = 0;
...@@ -753,6 +780,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, ...@@ -753,6 +780,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
} }
u64_stats_update_begin(&rq->stats.syncp); u64_stats_update_begin(&rq->stats.syncp);
rq->stats.vs.xdp_redirect += stats->xdp_redirect;
rq->stats.vs.xdp_bytes += stats->xdp_bytes; rq->stats.vs.xdp_bytes += stats->xdp_bytes;
rq->stats.vs.xdp_drops += stats->xdp_drops; rq->stats.vs.xdp_drops += stats->xdp_drops;
rq->stats.vs.rx_drops += stats->rx_drops; rq->stats.vs.rx_drops += stats->rx_drops;
...@@ -1172,7 +1200,7 @@ static const struct net_device_ops veth_netdev_ops = { ...@@ -1172,7 +1200,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_features_check = passthru_features_check, .ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp, .ndo_bpf = veth_xdp,
.ndo_xdp_xmit = veth_xdp_xmit, .ndo_xdp_xmit = veth_ndo_xdp_xmit,
}; };
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment