Commit 9a4615be authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'eth-fbnic-add-basic-stats'

Jakub Kicinski says:

====================
eth: fbnic: add basic stats

Add basic interface stats to fbnic.

v1: https://lore.kernel.org/20240807022631.1664327-1-kuba@kernel.org
====================

Link: https://patch.msgid.link/20240810054322.2766421-1-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents fe1f4335 8be1bd91
......@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/ipv6.h>
#include <linux/types.h>
#include <net/netdev_queues.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
......@@ -316,6 +317,74 @@ void fbnic_clear_rx_mode(struct net_device *netdev)
__dev_mc_unsync(netdev, NULL);
}
static void fbnic_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64)
{
u64 tx_bytes, tx_packets, tx_dropped = 0;
u64 rx_bytes, rx_packets, rx_dropped = 0;
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_queue_stats *stats;
unsigned int start, i;
stats = &fbn->tx_stats;
tx_bytes = stats->bytes;
tx_packets = stats->packets;
tx_dropped = stats->dropped;
stats64->tx_bytes = tx_bytes;
stats64->tx_packets = tx_packets;
stats64->tx_dropped = tx_dropped;
for (i = 0; i < fbn->num_tx_queues; i++) {
struct fbnic_ring *txr = fbn->tx[i];
if (!txr)
continue;
stats = &txr->stats;
do {
start = u64_stats_fetch_begin(&stats->syncp);
tx_bytes = stats->bytes;
tx_packets = stats->packets;
tx_dropped = stats->dropped;
} while (u64_stats_fetch_retry(&stats->syncp, start));
stats64->tx_bytes += tx_bytes;
stats64->tx_packets += tx_packets;
stats64->tx_dropped += tx_dropped;
}
stats = &fbn->rx_stats;
rx_bytes = stats->bytes;
rx_packets = stats->packets;
rx_dropped = stats->dropped;
stats64->rx_bytes = rx_bytes;
stats64->rx_packets = rx_packets;
stats64->rx_dropped = rx_dropped;
for (i = 0; i < fbn->num_rx_queues; i++) {
struct fbnic_ring *rxr = fbn->rx[i];
if (!rxr)
continue;
stats = &rxr->stats;
do {
start = u64_stats_fetch_begin(&stats->syncp);
rx_bytes = stats->bytes;
rx_packets = stats->packets;
rx_dropped = stats->dropped;
} while (u64_stats_fetch_retry(&stats->syncp, start));
stats64->rx_bytes += rx_bytes;
stats64->rx_packets += rx_packets;
stats64->rx_dropped += rx_dropped;
}
}
static const struct net_device_ops fbnic_netdev_ops = {
.ndo_open = fbnic_open,
.ndo_stop = fbnic_stop,
......@@ -324,6 +393,72 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_features_check = fbnic_features_check,
.ndo_set_mac_address = fbnic_set_mac,
.ndo_set_rx_mode = fbnic_set_rx_mode,
.ndo_get_stats64 = fbnic_get_stats64,
};
static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
struct netdev_queue_stats_rx *rx)
{
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
struct fbnic_queue_stats *stats;
unsigned int start;
u64 bytes, packets;
if (!rxr)
return;
stats = &rxr->stats;
do {
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
} while (u64_stats_fetch_retry(&stats->syncp, start));
rx->bytes = bytes;
rx->packets = packets;
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
struct netdev_queue_stats_tx *tx)
{
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
unsigned int start;
u64 bytes, packets;
if (!txr)
return;
stats = &txr->stats;
do {
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
} while (u64_stats_fetch_retry(&stats->syncp, start));
tx->bytes = bytes;
tx->packets = packets;
}
static void fbnic_get_base_stats(struct net_device *dev,
struct netdev_queue_stats_rx *rx,
struct netdev_queue_stats_tx *tx)
{
struct fbnic_net *fbn = netdev_priv(dev);
tx->bytes = fbn->tx_stats.bytes;
tx->packets = fbn->tx_stats.packets;
rx->bytes = fbn->rx_stats.bytes;
rx->packets = fbn->rx_stats.packets;
}
static const struct netdev_stat_ops fbnic_stat_ops = {
.get_queue_stats_rx = fbnic_get_queue_stats_rx,
.get_queue_stats_tx = fbnic_get_queue_stats_tx,
.get_base_stats = fbnic_get_base_stats,
};
void fbnic_reset_queues(struct fbnic_net *fbn,
......@@ -384,6 +519,7 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbd->netdev = netdev;
netdev->netdev_ops = &fbnic_netdev_ops;
netdev->stat_ops = &fbnic_stat_ops;
fbn = netdev_priv(netdev);
......
......@@ -40,6 +40,9 @@ struct fbnic_net {
u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
u32 rss_flow_hash[FBNIC_NUM_HASH_OPT];
/* Storage for stats after ring destruction */
struct fbnic_queue_stats tx_stats;
struct fbnic_queue_stats rx_stats;
u64 link_down_events;
struct list_head napis;
......
......@@ -273,6 +273,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
err_free:
dev_kfree_skb_any(skb);
err_count:
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.dropped++;
u64_stats_update_end(&ring->stats.syncp);
return NETDEV_TX_OK;
}
......@@ -363,10 +366,19 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
txq = txring_txq(nv->napi.dev, ring);
if (unlikely(discard)) {
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.dropped += total_packets;
u64_stats_update_end(&ring->stats.syncp);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
return;
}
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.bytes += total_bytes;
ring->stats.packets += total_packets;
u64_stats_update_end(&ring->stats.syncp);
netif_txq_completed_wake(txq, total_packets, total_bytes,
fbnic_desc_unused(ring),
FBNIC_TX_DESC_WAKEUP);
......@@ -730,12 +742,12 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
unsigned int packets = 0, bytes = 0, dropped = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
s32 head0 = -1, head1 = -1;
__le64 *raw_rcd, done;
u32 head = rcq->head;
u64 packets = 0;
done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
raw_rcd = &rcq->desc[head & rcq->size_mask];
......@@ -780,9 +792,11 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
fbnic_populate_skb_fields(nv, rcd, skb, qt);
packets++;
bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
} else {
dropped++;
fbnic_put_pkt_buff(nv, pkt, 1);
}
......@@ -799,6 +813,14 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
}
}
u64_stats_update_begin(&rcq->stats.syncp);
rcq->stats.packets += packets;
rcq->stats.bytes += bytes;
/* Re-add ethernet header length (removed in fbnic_build_skb) */
rcq->stats.bytes += ETH_HLEN * packets;
rcq->stats.dropped += dropped;
u64_stats_update_end(&rcq->stats.syncp);
/* Unmap and free processed buffers */
if (head0 >= 0)
fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
......@@ -865,12 +887,36 @@ static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
struct fbnic_ring *rxr)
{
struct fbnic_queue_stats *stats = &rxr->stats;
/* Capture stats from queues before dissasociating them */
fbn->rx_stats.bytes += stats->bytes;
fbn->rx_stats.packets += stats->packets;
fbn->rx_stats.dropped += stats->dropped;
}
static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
struct fbnic_queue_stats *stats = &txr->stats;
/* Capture stats from queues before dissasociating them */
fbn->tx_stats.bytes += stats->bytes;
fbn->tx_stats.packets += stats->packets;
fbn->tx_stats.dropped += stats->dropped;
}
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
if (!(txr->flags & FBNIC_RING_F_STATS))
return;
fbnic_aggregate_ring_tx_counters(fbn, txr);
/* Remove pointer to the Tx ring */
WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
fbn->tx[txr->q_idx] = NULL;
......@@ -882,6 +928,8 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
if (!(rxr->flags & FBNIC_RING_F_STATS))
return;
fbnic_aggregate_ring_rx_counters(fbn, rxr);
/* Remove pointer to the Rx ring */
WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
fbn->rx[rxr->q_idx] = NULL;
......@@ -974,6 +1022,7 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
int q_idx, u8 flags)
{
u64_stats_init(&ring->stats.syncp);
ring->doorbell = doorbell;
ring->q_idx = q_idx;
ring->flags = flags;
......
......@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/u64_stats_sync.h>
#include <net/xdp.h>
struct fbnic_net;
......@@ -51,6 +52,13 @@ struct fbnic_pkt_buff {
u16 nr_frags;
};
struct fbnic_queue_stats {
u64 packets;
u64 bytes;
u64 dropped;
struct u64_stats_sync syncp;
};
/* Pagecnt bias is long max to reserve the last bit to catch overflow
* cases where if we overcharge the bias it will flip over to be negative.
*/
......@@ -77,6 +85,8 @@ struct fbnic_ring {
u32 head, tail; /* Head/Tail of ring */
struct fbnic_queue_stats stats;
/* Slow path fields follow */
dma_addr_t dma; /* Phys addr of descriptor memory */
size_t size; /* Size of descriptor ring in memory */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment