Commit 5eaa0bd8 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

loopback: use u64_stats_sync infrastructure

Commit 6b10de38 (loopback: Implement 64bit stats on 32bit arches)
introduced 64bit stats in loopback driver, using a private seqcount and
private helpers.

David suggested to introduce a generic infrastructure, added in (net:
Introduce u64_stats_sync infrastructure)

This patch reimplements loopback 64bit stats using the u64_stats_sync
infrastructure.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4b4194c4
...@@ -58,53 +58,15 @@ ...@@ -58,53 +58,15 @@
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <linux/u64_stats_sync.h>
struct pcpu_lstats { struct pcpu_lstats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) struct u64_stats_sync syncp;
seqcount_t seq; unsigned long drops;
#endif
unsigned long drops;
}; };
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
static void inline lstats_update_begin(struct pcpu_lstats *lstats)
{
write_seqcount_begin(&lstats->seq);
}
static void inline lstats_update_end(struct pcpu_lstats *lstats)
{
write_seqcount_end(&lstats->seq);
}
static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
{
u64 tpackets, tbytes;
unsigned int seq;
do {
seq = read_seqcount_begin(&lstats->seq);
tpackets = lstats->packets;
tbytes = lstats->bytes;
} while (read_seqcount_retry(&lstats->seq, seq));
*packets += tpackets;
*bytes += tbytes;
}
#else
static void inline lstats_update_begin(struct pcpu_lstats *lstats)
{
}
static void inline lstats_update_end(struct pcpu_lstats *lstats)
{
}
static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
{
*packets += lstats->packets;
*bytes += lstats->bytes;
}
#endif
/* /*
* The higher levels take care of making this non-reentrant (it's * The higher levels take care of making this non-reentrant (it's
* called with bh's disabled). * called with bh's disabled).
...@@ -126,10 +88,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, ...@@ -126,10 +88,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
len = skb->len; len = skb->len;
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
lstats_update_begin(lb_stats); u64_stats_update_begin(&lb_stats->syncp);
lb_stats->bytes += len; lb_stats->bytes += len;
lb_stats->packets++; lb_stats->packets++;
lstats_update_end(lb_stats); u64_stats_update_end(&lb_stats->syncp);
} else } else
lb_stats->drops++; lb_stats->drops++;
...@@ -148,10 +110,18 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev) ...@@ -148,10 +110,18 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev)
pcpu_lstats = (void __percpu __force *)dev->ml_priv; pcpu_lstats = (void __percpu __force *)dev->ml_priv;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats; const struct pcpu_lstats *lb_stats;
u64 tbytes, tpackets;
unsigned int start;
lb_stats = per_cpu_ptr(pcpu_lstats, i); lb_stats = per_cpu_ptr(pcpu_lstats, i);
lstats_fetch_and_add(&packets, &bytes, lb_stats); do {
start = u64_stats_fetch_begin(&lb_stats->syncp);
tbytes = lb_stats->bytes;
tpackets = lb_stats->packets;
} while (u64_stats_fetch_retry(&lb_stats->syncp, start));
drops += lb_stats->drops; drops += lb_stats->drops;
bytes += tbytes;
packets += tpackets;
} }
stats->rx_packets = packets; stats->rx_packets = packets;
stats->tx_packets = packets; stats->tx_packets = packets;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment