Commit 290b895e authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tunnels: prepare percpu accounting

Tunnels are going to use percpu for their accounting.

They are going to use a new tstats field in net_device.

skb_tunnel_rx() is changed to be a wrapper around __skb_tunnel_rx()

IPTUNNEL_XMIT() is changed to be a wrapper around __IPTUNNEL_XMIT()
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent af5ef241
...@@ -1053,6 +1053,7 @@ struct net_device { ...@@ -1053,6 +1053,7 @@ struct net_device {
union { union {
void *ml_priv; void *ml_priv;
struct pcpu_lstats __percpu *lstats; /* loopback stats */ struct pcpu_lstats __percpu *lstats; /* loopback stats */
struct pcpu_tstats __percpu *tstats; /* tunnel stats */
}; };
/* GARP */ /* GARP */
struct garp_port *garp_port; struct garp_port *garp_port;
......
...@@ -227,6 +227,23 @@ static inline void skb_dst_force(struct sk_buff *skb) ...@@ -227,6 +227,23 @@ static inline void skb_dst_force(struct sk_buff *skb)
} }
/**
* __skb_tunnel_rx - prepare skb for rx reinsert
* @skb: buffer
* @dev: tunnel device
*
* After decapsulation, packet is going to re-enter (netif_rx()) our stack,
* so make some cleanups. (no accounting done)
*/
static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
skb->rxhash = 0;
skb_set_queue_mapping(skb, 0);
skb_dst_drop(skb);
nf_reset(skb);
}
/** /**
* skb_tunnel_rx - prepare skb for rx reinsert * skb_tunnel_rx - prepare skb for rx reinsert
* @skb: buffer * @skb: buffer
...@@ -234,17 +251,14 @@ static inline void skb_dst_force(struct sk_buff *skb) ...@@ -234,17 +251,14 @@ static inline void skb_dst_force(struct sk_buff *skb)
* *
* After decapsulation, packet is going to re-enter (netif_rx()) our stack, * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
* so make some cleanups, and perform accounting. * so make some cleanups, and perform accounting.
* Note: this accounting is not SMP safe.
*/ */
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
{ {
skb->dev = dev;
/* TODO : stats should be SMP safe */ /* TODO : stats should be SMP safe */
dev->stats.rx_packets++; dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len; dev->stats.rx_bytes += skb->len;
skb->rxhash = 0; __skb_tunnel_rx(skb, dev);
skb_set_queue_mapping(skb, 0);
skb_dst_drop(skb);
nf_reset(skb);
} }
/* Children define the path of the packet through the /* Children define the path of the packet through the
......
...@@ -45,7 +45,7 @@ struct ip_tunnel_prl_entry { ...@@ -45,7 +45,7 @@ struct ip_tunnel_prl_entry {
struct rcu_head rcu_head; struct rcu_head rcu_head;
}; };
#define IPTUNNEL_XMIT() do { \ #define __IPTUNNEL_XMIT(stats1, stats2) do { \
int err; \ int err; \
int pkt_len = skb->len - skb_transport_offset(skb); \ int pkt_len = skb->len - skb_transport_offset(skb); \
\ \
...@@ -54,12 +54,14 @@ struct ip_tunnel_prl_entry { ...@@ -54,12 +54,14 @@ struct ip_tunnel_prl_entry {
\ \
err = ip_local_out(skb); \ err = ip_local_out(skb); \
if (likely(net_xmit_eval(err) == 0)) { \ if (likely(net_xmit_eval(err) == 0)) { \
txq->tx_bytes += pkt_len; \ (stats1)->tx_bytes += pkt_len; \
txq->tx_packets++; \ (stats1)->tx_packets++; \
} else { \ } else { \
stats->tx_errors++; \ (stats2)->tx_errors++; \
stats->tx_aborted_errors++; \ (stats2)->tx_aborted_errors++; \
} \ } \
} while (0) } while (0)
#define IPTUNNEL_XMIT() __IPTUNNEL_XMIT(txq, stats)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment