Commit 34fe76ab authored by David S. Miller's avatar David S. Miller

Merge branch 'net-sched-fast-stats'

Eric Dumazet says:

====================
net: sched: faster stats gathering

A while back, I sent one RFC patch using lockless stats gathering
on 64bit arches.

This patch series does it more cleanly, using a seqcount.

Since qdisc/class stats are written at dequeue() time,
we can ask the dequeue to change the seqcount, so that
stats readers can avoid taking the root qdisc lock,
and instead the typical read_seqcount_{begin|retry} guarded
loop.

This does not change fast path costs, as the seqcount
increments are not more expensive than the bit manipulation,
and allows readers to not freeze the fast path anymore.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 64151ae3 edb09eb1
...@@ -21,7 +21,7 @@ struct mystruct { ...@@ -21,7 +21,7 @@ struct mystruct {
... ...
}; };
Update statistics: Update statistics, in dequeue() methods only, (while owning qdisc->running)
mystruct->tstats.packet++; mystruct->tstats.packet++;
mystruct->qstats.backlog += skb->pkt_len; mystruct->qstats.backlog += skb->pkt_len;
......
...@@ -4610,6 +4610,7 @@ static int bond_check_params(struct bond_params *params) ...@@ -4610,6 +4610,7 @@ static int bond_check_params(struct bond_params *params)
static struct lock_class_key bonding_netdev_xmit_lock_key; static struct lock_class_key bonding_netdev_xmit_lock_key;
static struct lock_class_key bonding_netdev_addr_lock_key; static struct lock_class_key bonding_netdev_addr_lock_key;
static struct lock_class_key bonding_tx_busylock_key; static struct lock_class_key bonding_tx_busylock_key;
static struct lock_class_key bonding_qdisc_running_key;
static void bond_set_lockdep_class_one(struct net_device *dev, static void bond_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq, struct netdev_queue *txq,
...@@ -4625,6 +4626,7 @@ static void bond_set_lockdep_class(struct net_device *dev) ...@@ -4625,6 +4626,7 @@ static void bond_set_lockdep_class(struct net_device *dev)
&bonding_netdev_addr_lock_key); &bonding_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL); netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
dev->qdisc_tx_busylock = &bonding_tx_busylock_key; dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
dev->qdisc_running_key = &bonding_qdisc_running_key;
} }
/* Called from registration process */ /* Called from registration process */
......
...@@ -1313,9 +1313,12 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) ...@@ -1313,9 +1313,12 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
} }
static struct lock_class_key ppp_tx_busylock; static struct lock_class_key ppp_tx_busylock;
static struct lock_class_key ppp_qdisc_running_key;
static int ppp_dev_init(struct net_device *dev) static int ppp_dev_init(struct net_device *dev)
{ {
dev->qdisc_tx_busylock = &ppp_tx_busylock; dev->qdisc_tx_busylock = &ppp_tx_busylock;
dev->qdisc_running_key = &ppp_qdisc_running_key;
return 0; return 0;
} }
......
...@@ -1577,6 +1577,7 @@ static const struct team_option team_options[] = { ...@@ -1577,6 +1577,7 @@ static const struct team_option team_options[] = {
static struct lock_class_key team_netdev_xmit_lock_key; static struct lock_class_key team_netdev_xmit_lock_key;
static struct lock_class_key team_netdev_addr_lock_key; static struct lock_class_key team_netdev_addr_lock_key;
static struct lock_class_key team_tx_busylock_key; static struct lock_class_key team_tx_busylock_key;
static struct lock_class_key team_qdisc_running_key;
static void team_set_lockdep_class_one(struct net_device *dev, static void team_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq, struct netdev_queue *txq,
...@@ -1590,6 +1591,7 @@ static void team_set_lockdep_class(struct net_device *dev) ...@@ -1590,6 +1591,7 @@ static void team_set_lockdep_class(struct net_device *dev)
lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key); lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL); netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
dev->qdisc_tx_busylock = &team_tx_busylock_key; dev->qdisc_tx_busylock = &team_tx_busylock_key;
dev->qdisc_running_key = &team_qdisc_running_key;
} }
static int team_init(struct net_device *dev) static int team_init(struct net_device *dev)
......
...@@ -1862,6 +1862,7 @@ struct net_device { ...@@ -1862,6 +1862,7 @@ struct net_device {
#endif #endif
struct phy_device *phydev; struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down; bool proto_down;
}; };
#define to_net_dev(d) container_of(d, struct net_device, dev) #define to_net_dev(d) container_of(d, struct net_device, dev)
......
...@@ -33,10 +33,12 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, ...@@ -33,10 +33,12 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d, spinlock_t *lock, struct gnet_dump *d,
int padattr); int padattr);
int gnet_stats_copy_basic(struct gnet_dump *d, int gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b); struct gnet_stats_basic_packed *b);
void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, void __gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b); struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d, int gnet_stats_copy_rate_est(struct gnet_dump *d,
...@@ -52,13 +54,15 @@ int gnet_stats_finish_copy(struct gnet_dump *d); ...@@ -52,13 +54,15 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats, int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt); spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est); struct gnet_stats_rate_est64 *rate_est);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt); spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est64 *rate_est); const struct gnet_stats_rate_est64 *rate_est);
#endif #endif
...@@ -29,13 +29,6 @@ enum qdisc_state_t { ...@@ -29,13 +29,6 @@ enum qdisc_state_t {
__QDISC_STATE_THROTTLED, __QDISC_STATE_THROTTLED,
}; };
/*
* following bits are only changed while qdisc lock is held
*/
enum qdisc___state_t {
__QDISC___STATE_RUNNING = 1,
};
struct qdisc_size_table { struct qdisc_size_table {
struct rcu_head rcu; struct rcu_head rcu;
struct list_head list; struct list_head list;
...@@ -93,7 +86,7 @@ struct Qdisc { ...@@ -93,7 +86,7 @@ struct Qdisc {
unsigned long state; unsigned long state;
struct sk_buff_head q; struct sk_buff_head q;
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_packed bstats;
unsigned int __state; seqcount_t running;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
struct rcu_head rcu_head; struct rcu_head rcu_head;
int padded; int padded;
...@@ -104,20 +97,20 @@ struct Qdisc { ...@@ -104,20 +97,20 @@ struct Qdisc {
static inline bool qdisc_is_running(const struct Qdisc *qdisc) static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{ {
return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
} }
static inline bool qdisc_run_begin(struct Qdisc *qdisc) static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{ {
if (qdisc_is_running(qdisc)) if (qdisc_is_running(qdisc))
return false; return false;
qdisc->__state |= __QDISC___STATE_RUNNING; write_seqcount_begin(&qdisc->running);
return true; return true;
} }
static inline void qdisc_run_end(struct Qdisc *qdisc) static inline void qdisc_run_end(struct Qdisc *qdisc)
{ {
qdisc->__state &= ~__QDISC___STATE_RUNNING; write_seqcount_end(&qdisc->running);
} }
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
...@@ -321,6 +314,14 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) ...@@ -321,6 +314,14 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root); return qdisc_lock(root);
} }
static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);
ASSERT_RTNL();
return &root->running;
}
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{ {
return qdisc->dev_queue->dev; return qdisc->dev_queue->dev;
......
...@@ -629,6 +629,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -629,6 +629,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
static struct lock_class_key bt_tx_busylock; static struct lock_class_key bt_tx_busylock;
static struct lock_class_key bt_netdev_xmit_lock_key; static struct lock_class_key bt_netdev_xmit_lock_key;
static struct lock_class_key bt_qdisc_running_key;
static void bt_set_lockdep_class_one(struct net_device *dev, static void bt_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq, struct netdev_queue *txq,
...@@ -641,6 +642,7 @@ static int bt_dev_init(struct net_device *dev) ...@@ -641,6 +642,7 @@ static int bt_dev_init(struct net_device *dev)
{ {
netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL); netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL);
dev->qdisc_tx_busylock = &bt_tx_busylock; dev->qdisc_tx_busylock = &bt_tx_busylock;
dev->qdisc_running_key = &bt_qdisc_running_key;
return 0; return 0;
} }
......
...@@ -3075,7 +3075,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3075,7 +3075,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
/* /*
* Heuristic to force contended enqueues to serialize on a * Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock. * separate lock before trying to get qdisc main lock.
* This permits __QDISC___STATE_RUNNING owner to get the lock more * This permits qdisc->running owner to get the lock more
* often and dequeue packets faster. * often and dequeue packets faster.
*/ */
contended = qdisc_is_running(q); contended = qdisc_is_running(q);
......
...@@ -84,6 +84,7 @@ struct gen_estimator ...@@ -84,6 +84,7 @@ struct gen_estimator
struct gnet_stats_basic_packed *bstats; struct gnet_stats_basic_packed *bstats;
struct gnet_stats_rate_est64 *rate_est; struct gnet_stats_rate_est64 *rate_est;
spinlock_t *stats_lock; spinlock_t *stats_lock;
seqcount_t *running;
int ewma_log; int ewma_log;
u32 last_packets; u32 last_packets;
unsigned long avpps; unsigned long avpps;
...@@ -121,25 +122,27 @@ static void est_timer(unsigned long arg) ...@@ -121,25 +122,27 @@ static void est_timer(unsigned long arg)
unsigned long rate; unsigned long rate;
u64 brate; u64 brate;
if (e->stats_lock)
spin_lock(e->stats_lock); spin_lock(e->stats_lock);
read_lock(&est_lock); read_lock(&est_lock);
if (e->bstats == NULL) if (e->bstats == NULL)
goto skip; goto skip;
__gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats); __gnet_stats_copy_basic(e->running, &b, e->cpu_bstats, e->bstats);
brate = (b.bytes - e->last_bytes)<<(7 - idx); brate = (b.bytes - e->last_bytes)<<(7 - idx);
e->last_bytes = b.bytes; e->last_bytes = b.bytes;
e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
e->rate_est->bps = (e->avbps+0xF)>>5; WRITE_ONCE(e->rate_est->bps, (e->avbps + 0xF) >> 5);
rate = b.packets - e->last_packets; rate = b.packets - e->last_packets;
rate <<= (7 - idx); rate <<= (7 - idx);
e->last_packets = b.packets; e->last_packets = b.packets;
e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
e->rate_est->pps = (e->avpps + 0xF) >> 5; WRITE_ONCE(e->rate_est->pps, (e->avpps + 0xF) >> 5);
skip: skip:
read_unlock(&est_lock); read_unlock(&est_lock);
if (e->stats_lock)
spin_unlock(e->stats_lock); spin_unlock(e->stats_lock);
} }
...@@ -194,6 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats ...@@ -194,6 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
* @cpu_bstats: bstats per cpu * @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics * @rate_est: rate estimator statistics
* @stats_lock: statistics lock * @stats_lock: statistics lock
* @running: qdisc running seqcount
* @opt: rate estimator configuration TLV * @opt: rate estimator configuration TLV
* *
* Creates a new rate estimator with &bstats as source and &rate_est * Creates a new rate estimator with &bstats as source and &rate_est
...@@ -209,6 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, ...@@ -209,6 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, spinlock_t *stats_lock,
seqcount_t *running,
struct nlattr *opt) struct nlattr *opt)
{ {
struct gen_estimator *est; struct gen_estimator *est;
...@@ -226,12 +231,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, ...@@ -226,12 +231,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
if (est == NULL) if (est == NULL)
return -ENOBUFS; return -ENOBUFS;
__gnet_stats_copy_basic(&b, cpu_bstats, bstats); __gnet_stats_copy_basic(running, &b, cpu_bstats, bstats);
idx = parm->interval + 2; idx = parm->interval + 2;
est->bstats = bstats; est->bstats = bstats;
est->rate_est = rate_est; est->rate_est = rate_est;
est->stats_lock = stats_lock; est->stats_lock = stats_lock;
est->running = running;
est->ewma_log = parm->ewma_log; est->ewma_log = parm->ewma_log;
est->last_bytes = b.bytes; est->last_bytes = b.bytes;
est->avbps = rate_est->bps<<5; est->avbps = rate_est->bps<<5;
...@@ -291,6 +297,7 @@ EXPORT_SYMBOL(gen_kill_estimator); ...@@ -291,6 +297,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
* @cpu_bstats: bstats per cpu * @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics * @rate_est: rate estimator statistics
* @stats_lock: statistics lock * @stats_lock: statistics lock
* @running: qdisc running seqcount (might be NULL)
* @opt: rate estimator configuration TLV * @opt: rate estimator configuration TLV
* *
* Replaces the configuration of a rate estimator by calling * Replaces the configuration of a rate estimator by calling
...@@ -301,10 +308,11 @@ EXPORT_SYMBOL(gen_kill_estimator); ...@@ -301,10 +308,11 @@ EXPORT_SYMBOL(gen_kill_estimator);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt) spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt)
{ {
gen_kill_estimator(bstats, rate_est); gen_kill_estimator(bstats, rate_est);
return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt); return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
} }
EXPORT_SYMBOL(gen_replace_estimator); EXPORT_SYMBOL(gen_replace_estimator);
......
...@@ -32,10 +32,11 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) ...@@ -32,10 +32,11 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
return 0; return 0;
nla_put_failure: nla_put_failure:
if (d->lock)
spin_unlock_bh(d->lock);
kfree(d->xstats); kfree(d->xstats);
d->xstats = NULL; d->xstats = NULL;
d->xstats_len = 0; d->xstats_len = 0;
spin_unlock_bh(d->lock);
return -1; return -1;
} }
...@@ -65,15 +66,16 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, ...@@ -65,15 +66,16 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
{ {
memset(d, 0, sizeof(*d)); memset(d, 0, sizeof(*d));
spin_lock_bh(lock);
d->lock = lock;
if (type) if (type)
d->tail = (struct nlattr *)skb_tail_pointer(skb); d->tail = (struct nlattr *)skb_tail_pointer(skb);
d->skb = skb; d->skb = skb;
d->compat_tc_stats = tc_stats_type; d->compat_tc_stats = tc_stats_type;
d->compat_xstats = xstats_type; d->compat_xstats = xstats_type;
d->padattr = padattr; d->padattr = padattr;
if (lock) {
d->lock = lock;
spin_lock_bh(lock);
}
if (d->tail) if (d->tail)
return gnet_stats_copy(d, type, NULL, 0, padattr); return gnet_stats_copy(d, type, NULL, 0, padattr);
...@@ -126,16 +128,23 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, ...@@ -126,16 +128,23 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
} }
void void
__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, __gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b) struct gnet_stats_basic_packed *b)
{ {
unsigned int seq;
if (cpu) { if (cpu) {
__gnet_stats_copy_basic_cpu(bstats, cpu); __gnet_stats_copy_basic_cpu(bstats, cpu);
} else { return;
}
do {
if (running)
seq = read_seqcount_begin(running);
bstats->bytes = b->bytes; bstats->bytes = b->bytes;
bstats->packets = b->packets; bstats->packets = b->packets;
} } while (running && read_seqcount_retry(running, seq));
} }
EXPORT_SYMBOL(__gnet_stats_copy_basic); EXPORT_SYMBOL(__gnet_stats_copy_basic);
...@@ -152,13 +161,14 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic); ...@@ -152,13 +161,14 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
* if the room in the socket buffer was not sufficient. * if the room in the socket buffer was not sufficient.
*/ */
int int
gnet_stats_copy_basic(struct gnet_dump *d, gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b) struct gnet_stats_basic_packed *b)
{ {
struct gnet_stats_basic_packed bstats = {0}; struct gnet_stats_basic_packed bstats = {0};
__gnet_stats_copy_basic(&bstats, cpu, b); __gnet_stats_copy_basic(running, &bstats, cpu, b);
if (d->compat_tc_stats) { if (d->compat_tc_stats) {
d->tc_stats.bytes = bstats.bytes; d->tc_stats.bytes = bstats.bytes;
...@@ -328,8 +338,9 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) ...@@ -328,8 +338,9 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
return 0; return 0;
err_out: err_out:
d->xstats_len = 0; if (d->lock)
spin_unlock_bh(d->lock); spin_unlock_bh(d->lock);
d->xstats_len = 0;
return -1; return -1;
} }
EXPORT_SYMBOL(gnet_stats_copy_app); EXPORT_SYMBOL(gnet_stats_copy_app);
...@@ -363,10 +374,11 @@ gnet_stats_finish_copy(struct gnet_dump *d) ...@@ -363,10 +374,11 @@ gnet_stats_finish_copy(struct gnet_dump *d)
return -1; return -1;
} }
if (d->lock)
spin_unlock_bh(d->lock);
kfree(d->xstats); kfree(d->xstats);
d->xstats = NULL; d->xstats = NULL;
d->xstats_len = 0; d->xstats_len = 0;
spin_unlock_bh(d->lock);
return 0; return 0;
} }
EXPORT_SYMBOL(gnet_stats_finish_copy); EXPORT_SYMBOL(gnet_stats_finish_copy);
...@@ -60,6 +60,7 @@ static struct header_ops lowpan_header_ops = { ...@@ -60,6 +60,7 @@ static struct header_ops lowpan_header_ops = {
static struct lock_class_key lowpan_tx_busylock; static struct lock_class_key lowpan_tx_busylock;
static struct lock_class_key lowpan_netdev_xmit_lock_key; static struct lock_class_key lowpan_netdev_xmit_lock_key;
static struct lock_class_key lowpan_qdisc_running_key;
static void lowpan_set_lockdep_class_one(struct net_device *ldev, static void lowpan_set_lockdep_class_one(struct net_device *ldev,
struct netdev_queue *txq, struct netdev_queue *txq,
...@@ -73,6 +74,8 @@ static int lowpan_dev_init(struct net_device *ldev) ...@@ -73,6 +74,8 @@ static int lowpan_dev_init(struct net_device *ldev)
{ {
netdev_for_each_tx_queue(ldev, lowpan_set_lockdep_class_one, NULL); netdev_for_each_tx_queue(ldev, lowpan_set_lockdep_class_one, NULL);
ldev->qdisc_tx_busylock = &lowpan_tx_busylock; ldev->qdisc_tx_busylock = &lowpan_tx_busylock;
ldev->qdisc_running_key = &lowpan_qdisc_running_key;
return 0; return 0;
} }
......
...@@ -68,6 +68,8 @@ static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) ...@@ -68,6 +68,8 @@ static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
} }
static struct lock_class_key l2tp_eth_tx_busylock; static struct lock_class_key l2tp_eth_tx_busylock;
static struct lock_class_key l2tp_qdisc_running_key;
static int l2tp_eth_dev_init(struct net_device *dev) static int l2tp_eth_dev_init(struct net_device *dev)
{ {
struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_eth *priv = netdev_priv(dev);
...@@ -76,6 +78,8 @@ static int l2tp_eth_dev_init(struct net_device *dev) ...@@ -76,6 +78,8 @@ static int l2tp_eth_dev_init(struct net_device *dev)
eth_hw_addr_random(dev); eth_hw_addr_random(dev);
eth_broadcast_addr(dev->broadcast); eth_broadcast_addr(dev->broadcast);
dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock; dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
dev->qdisc_running_key = &l2tp_qdisc_running_key;
return 0; return 0;
} }
......
...@@ -137,7 +137,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) ...@@ -137,7 +137,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
cfg.est.ewma_log = info->ewma_log; cfg.est.ewma_log = info->ewma_log;
ret = gen_new_estimator(&est->bstats, NULL, &est->rstats, ret = gen_new_estimator(&est->bstats, NULL, &est->rstats,
&est->lock, &cfg.opt); &est->lock, NULL, &cfg.opt);
if (ret < 0) if (ret < 0)
goto err2; goto err2;
......
...@@ -287,7 +287,7 @@ int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est, ...@@ -287,7 +287,7 @@ int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
if (est) { if (est) {
err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats, err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
&p->tcfc_rate_est, &p->tcfc_rate_est,
&p->tcfc_lock, est); &p->tcfc_lock, NULL, est);
if (err) { if (err) {
free_percpu(p->cpu_qstats); free_percpu(p->cpu_qstats);
goto err2; goto err2;
...@@ -671,7 +671,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, ...@@ -671,7 +671,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0) if (err < 0)
goto errout; goto errout;
if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 || if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 || &p->tcfc_rate_est) < 0 ||
gnet_stats_copy_queue(&d, p->cpu_qstats, gnet_stats_copy_queue(&d, p->cpu_qstats,
......
...@@ -185,7 +185,8 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, ...@@ -185,7 +185,8 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
if (est) { if (est) {
err = gen_replace_estimator(&police->tcf_bstats, NULL, err = gen_replace_estimator(&police->tcf_bstats, NULL,
&police->tcf_rate_est, &police->tcf_rate_est,
&police->tcf_lock, est); &police->tcf_lock,
NULL, est);
if (err) if (err)
goto failure_unlock; goto failure_unlock;
} else if (tb[TCA_POLICE_AVRATE] && } else if (tb[TCA_POLICE_AVRATE] &&
......
...@@ -982,7 +982,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -982,7 +982,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
rcu_assign_pointer(sch->stab, stab); rcu_assign_pointer(sch->stab, stab);
} }
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
spinlock_t *root_lock; seqcount_t *running;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT) if (sch->flags & TCQ_F_MQROOT)
...@@ -991,14 +991,15 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -991,14 +991,15 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
if ((sch->parent != TC_H_ROOT) && if ((sch->parent != TC_H_ROOT) &&
!(sch->flags & TCQ_F_INGRESS) && !(sch->flags & TCQ_F_INGRESS) &&
(!p || !(p->flags & TCQ_F_MQROOT))) (!p || !(p->flags & TCQ_F_MQROOT)))
root_lock = qdisc_root_sleeping_lock(sch); running = qdisc_root_sleeping_running(sch);
else else
root_lock = qdisc_lock(sch); running = &sch->running;
err = gen_new_estimator(&sch->bstats, err = gen_new_estimator(&sch->bstats,
sch->cpu_bstats, sch->cpu_bstats,
&sch->rate_est, &sch->rate_est,
root_lock, NULL,
running,
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
goto err_out4; goto err_out4;
...@@ -1061,7 +1062,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) ...@@ -1061,7 +1062,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
gen_replace_estimator(&sch->bstats, gen_replace_estimator(&sch->bstats,
sch->cpu_bstats, sch->cpu_bstats,
&sch->rate_est, &sch->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
} }
out: out:
...@@ -1369,8 +1371,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, ...@@ -1369,8 +1371,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto nla_put_failure; goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
qdisc_root_sleeping_lock(q), &d, NULL, &d, TCA_PAD) < 0)
TCA_PAD) < 0)
goto nla_put_failure; goto nla_put_failure;
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
...@@ -1381,7 +1382,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, ...@@ -1381,7 +1382,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
cpu_qstats = q->cpu_qstats; cpu_qstats = q->cpu_qstats;
} }
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
&d, cpu_bstats, &q->bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure; goto nla_put_failure;
...@@ -1684,8 +1686,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, ...@@ -1684,8 +1686,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
goto nla_put_failure; goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
qdisc_root_sleeping_lock(q), &d, NULL, &d, TCA_PAD) < 0)
TCA_PAD) < 0)
goto nla_put_failure; goto nla_put_failure;
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
......
...@@ -637,7 +637,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -637,7 +637,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{ {
struct atm_flow_data *flow = (struct atm_flow_data *)arg; struct atm_flow_data *flow = (struct atm_flow_data *)arg;
if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &flow->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
return -1; return -1;
......
...@@ -1600,7 +1600,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1600,7 +1600,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (cl->undertime != PSCHED_PASTPERFECT) if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now; cl->xstats.undertime = cl->undertime - q->now;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
return -1; return -1;
...@@ -1755,7 +1756,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t ...@@ -1755,7 +1756,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
qdisc_put_rtab(rtab); qdisc_put_rtab(rtab);
...@@ -1848,7 +1850,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t ...@@ -1848,7 +1850,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
kfree(cl); kfree(cl);
......
...@@ -91,7 +91,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -91,7 +91,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
...@@ -119,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -119,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
qdisc_destroy(cl->qdisc); qdisc_destroy(cl->qdisc);
...@@ -279,7 +281,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -279,7 +281,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (qlen) if (qlen)
xstats.deficit = cl->deficit; xstats.deficit = cl->deficit;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
return -1; return -1;
......
...@@ -566,11 +566,13 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) ...@@ -566,11 +566,13 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.qdisc_stats.memory_usage = q->memory_usage; st.qdisc_stats.memory_usage = q->memory_usage;
st.qdisc_stats.drop_overmemory = q->drop_overmemory; st.qdisc_stats.drop_overmemory = q->drop_overmemory;
sch_tree_lock(sch);
list_for_each(pos, &q->new_flows) list_for_each(pos, &q->new_flows)
st.qdisc_stats.new_flows_len++; st.qdisc_stats.new_flows_len++;
list_for_each(pos, &q->old_flows) list_for_each(pos, &q->old_flows)
st.qdisc_stats.old_flows_len++; st.qdisc_stats.old_flows_len++;
sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st)); return gnet_stats_copy_app(d, &st, sizeof(st));
} }
...@@ -624,7 +626,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -624,7 +626,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
if (idx < q->flows_cnt) { if (idx < q->flows_cnt) {
const struct fq_codel_flow *flow = &q->flows[idx]; const struct fq_codel_flow *flow = &q->flows[idx];
const struct sk_buff *skb = flow->head; const struct sk_buff *skb;
memset(&xstats, 0, sizeof(xstats)); memset(&xstats, 0, sizeof(xstats));
xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
...@@ -642,10 +644,15 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -642,10 +644,15 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
codel_time_to_us(delta) : codel_time_to_us(delta) :
-codel_time_to_us(-delta); -codel_time_to_us(-delta);
} }
if (flow->head) {
sch_tree_lock(sch);
skb = flow->head;
while (skb) { while (skb) {
qs.qlen++; qs.qlen++;
skb = skb->next; skb = skb->next;
} }
sch_tree_unlock(sch);
}
qs.backlog = q->backlogs[idx]; qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped; qs.drops = flow->dropped;
} }
......
...@@ -110,7 +110,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, ...@@ -110,7 +110,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
/* /*
* Transmit possibly several skbs, and handle the return status as * Transmit possibly several skbs, and handle the return status as
* required. Holding the __QDISC___STATE_RUNNING bit guarantees that * required. Owning running seqcount bit guarantees that
* only one CPU can execute this function. * only one CPU can execute this function.
* *
* Returns to the caller: * Returns to the caller:
...@@ -137,10 +137,10 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ...@@ -137,10 +137,10 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_UNLOCK(dev, txq); HARD_TX_UNLOCK(dev, txq);
} else { } else {
spin_lock(root_lock); spin_lock_nested(root_lock, SINGLE_DEPTH_NESTING);
return qdisc_qlen(q); return qdisc_qlen(q);
} }
spin_lock(root_lock); spin_lock_nested(root_lock, SINGLE_DEPTH_NESTING);
if (dev_xmit_complete(ret)) { if (dev_xmit_complete(ret)) {
/* Driver sent out skb successfully or skb was consumed */ /* Driver sent out skb successfully or skb was consumed */
...@@ -163,7 +163,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ...@@ -163,7 +163,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
/* /*
* NOTE: Called under qdisc_lock(q) with locally disabled BH. * NOTE: Called under qdisc_lock(q) with locally disabled BH.
* *
* __QDISC___STATE_RUNNING guarantees only one CPU can process * running seqcount guarantees only one CPU can process
* this qdisc at a time. qdisc_lock(q) serializes queue accesses for * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
* this queue. * this queue.
* *
...@@ -379,6 +379,7 @@ struct Qdisc noop_qdisc = { ...@@ -379,6 +379,7 @@ struct Qdisc noop_qdisc = {
.list = LIST_HEAD_INIT(noop_qdisc.list), .list = LIST_HEAD_INIT(noop_qdisc.list),
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue, .dev_queue = &noop_netdev_queue,
.running = SEQCNT_ZERO(noop_qdisc.running),
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
}; };
EXPORT_SYMBOL(noop_qdisc); EXPORT_SYMBOL(noop_qdisc);
...@@ -537,6 +538,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = { ...@@ -537,6 +538,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
EXPORT_SYMBOL(pfifo_fast_ops); EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock; static struct lock_class_key qdisc_tx_busylock;
static struct lock_class_key qdisc_running_key;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops) const struct Qdisc_ops *ops)
...@@ -570,6 +572,10 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, ...@@ -570,6 +572,10 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
lockdep_set_class(&sch->busylock, lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
seqcount_init(&sch->running);
lockdep_set_class(&sch->running,
dev->qdisc_running_key ?: &qdisc_running_key);
sch->ops = ops; sch->ops = ops;
sch->enqueue = ops->enqueue; sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue; sch->dequeue = ops->dequeue;
......
...@@ -1015,11 +1015,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -1015,11 +1015,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cur_time = psched_get_time(); cur_time = psched_get_time();
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
spinlock_t *lock = qdisc_root_sleeping_lock(sch);
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
lock, NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
...@@ -1068,7 +1067,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -1068,7 +1067,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
kfree(cl); kfree(cl);
...@@ -1373,7 +1373,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1373,7 +1373,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.work = cl->cl_total; xstats.work = cl->cl_total;
xstats.rtwork = cl->cl_cumul; xstats.rtwork = cl->cl_cumul;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
return -1; return -1;
......
...@@ -1141,7 +1141,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) ...@@ -1141,7 +1141,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1; return -1;
...@@ -1395,7 +1396,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1395,7 +1396,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (htb_rate_est || tca[TCA_RATE]) { if (htb_rate_est || tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE] ? : &est.nla); tca[TCA_RATE] ? : &est.nla);
if (err) { if (err) {
kfree(cl); kfree(cl);
...@@ -1457,11 +1459,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1457,11 +1459,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
parent->children++; parent->children++;
} else { } else {
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
spinlock_t *lock = qdisc_root_sleeping_lock(sch);
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
lock, NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
......
...@@ -199,7 +199,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -199,7 +199,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mq_queue_get(sch, cl); struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -342,6 +342,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -342,6 +342,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
* hold here is the look on dev_queue->qdisc_sleeping * hold here is the look on dev_queue->qdisc_sleeping
* also acquired below. * also acquired below.
*/ */
if (d->lock)
spin_unlock_bh(d->lock); spin_unlock_bh(d->lock);
for (i = tc.offset; i < tc.offset + tc.count; i++) { for (i = tc.offset; i < tc.offset + tc.count; i++) {
...@@ -359,15 +360,17 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -359,15 +360,17 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
spin_unlock_bh(qdisc_lock(qdisc)); spin_unlock_bh(qdisc_lock(qdisc));
} }
/* Reclaim root sleeping lock before completing stats */ /* Reclaim root sleeping lock before completing stats */
if (d->lock)
spin_lock_bh(d->lock); spin_lock_bh(d->lock);
if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 || if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
return -1; return -1;
} else { } else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, gnet_stats_copy_queue(d, NULL,
&sch->qstats, sch->q.qlen) < 0) &sch->qstats, sch->q.qlen) < 0)
return -1; return -1;
......
...@@ -356,7 +356,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -356,7 +356,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q; struct Qdisc *cl_q;
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1; return -1;
......
...@@ -319,7 +319,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -319,7 +319,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q; struct Qdisc *cl_q;
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1; return -1;
......
...@@ -460,7 +460,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -460,7 +460,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
...@@ -486,7 +487,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -486,7 +487,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
goto destroy_class; goto destroy_class;
...@@ -663,7 +665,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -663,7 +665,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.weight = cl->agg->class_weight; xstats.weight = cl->agg->class_weight;
xstats.lmax = cl->agg->lmax; xstats.lmax = cl->agg->lmax;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, gnet_stats_copy_queue(d, NULL,
&cl->qdisc->qstats, cl->qdisc->q.qlen) < 0) &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment