Commit ca375cf3 authored by WANG Cong's avatar WANG Cong Committed by Greg Kroah-Hartman

net_sched: update hierarchical backlog too

[ Upstream commit 2ccccf5f ]

When the bottom qdisc decides to, for example, drop some packet,
it calls qdisc_tree_decrease_qlen() to update the queue length
for all its ancestors, we need to update the backlog too to
keep the stats on root qdisc accurate.

Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Acked-by: default avatarJamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 1188e140
...@@ -162,12 +162,14 @@ struct codel_vars { ...@@ -162,12 +162,14 @@ struct codel_vars {
* struct codel_stats - contains codel shared variables and stats * struct codel_stats - contains codel shared variables and stats
* @maxpacket: largest packet we've seen so far * @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue() * @drop_count: temp count of dropped packets in dequeue()
* @drop_len: bytes of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping * ecn_mark: number of packets we ECN marked instead of dropping
* ce_mark: number of packets CE marked because sojourn time was above ce_threshold * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/ */
struct codel_stats { struct codel_stats {
u32 maxpacket; u32 maxpacket;
u32 drop_count; u32 drop_count;
u32 drop_len;
u32 ecn_mark; u32 ecn_mark;
u32 ce_mark; u32 ce_mark;
}; };
...@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, ...@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt); vars->rec_inv_sqrt);
goto end; goto end;
} }
stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
stats->drop_count++; stats->drop_count++;
skb = dequeue_func(vars, sch); skb = dequeue_func(vars, sch);
...@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, ...@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
if (params->ecn && INET_ECN_set_ce(skb)) { if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++; stats->ecn_mark++;
} else { } else {
stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
stats->drop_count++; stats->drop_count++;
......
...@@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, ...@@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc); struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc); void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc); void qdisc_destroy(struct Qdisc *qdisc);
void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops); const struct Qdisc_ops *ops);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
...@@ -707,7 +708,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, ...@@ -707,7 +708,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
old = *pold; old = *pold;
*pold = new; *pold = new;
if (old != NULL) { if (old != NULL) {
qdisc_tree_decrease_qlen(old, old->q.qlen); qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
qdisc_reset(old); qdisc_reset(old);
} }
sch_tree_unlock(sch); sch_tree_unlock(sch);
......
...@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev) ...@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
return 0; return 0;
} }
void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
unsigned int len)
{ {
const struct Qdisc_class_ops *cops; const struct Qdisc_class_ops *cops;
unsigned long cl; unsigned long cl;
u32 parentid; u32 parentid;
int drops; int drops;
if (n == 0) if (n == 0 && len == 0)
return; return;
drops = max_t(int, n, 0); drops = max_t(int, n, 0);
rcu_read_lock(); rcu_read_lock();
...@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) ...@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
cops->put(sch, cl); cops->put(sch, cl);
} }
sch->q.qlen -= n; sch->q.qlen -= n;
sch->qstats.backlog -= len;
__qdisc_qstats_drop(sch, drops); __qdisc_qstats_drop(sch, drops);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL(qdisc_tree_decrease_qlen); EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
static void notify_and_destroy(struct net *net, struct sk_buff *skb, static void notify_and_destroy(struct net *net, struct sk_buff *skb,
struct nlmsghdr *n, u32 clid, struct nlmsghdr *n, u32 clid,
......
...@@ -1909,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) ...@@ -1909,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg; struct cbq_class *cl = (struct cbq_class *)arg;
unsigned int qlen; unsigned int qlen, backlog;
if (cl->filters || cl->children || cl == &q->link) if (cl->filters || cl->children || cl == &q->link)
return -EBUSY; return -EBUSY;
...@@ -1917,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) ...@@ -1917,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch); sch_tree_lock(sch);
qlen = cl->q->q.qlen; qlen = cl->q->q.qlen;
backlog = cl->q->qstats.backlog;
qdisc_reset(cl->q); qdisc_reset(cl->q);
qdisc_tree_decrease_qlen(cl->q, qlen); qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
if (cl->next_alive) if (cl->next_alive)
cbq_deactivate_class(cl); cbq_deactivate_class(cl);
......
...@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) ...@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
choke_zap_tail_holes(q); choke_zap_tail_holes(q);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
qdisc_tree_decrease_qlen(sch, 1);
--sch->q.qlen; --sch->q.qlen;
} }
...@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
old = q->tab; old = q->tab;
if (old) { if (old) {
unsigned int oqlen = sch->q.qlen, tail = 0; unsigned int oqlen = sch->q.qlen, tail = 0;
unsigned dropped = 0;
while (q->head != q->tail) { while (q->head != q->tail) {
struct sk_buff *skb = q->tab[q->head]; struct sk_buff *skb = q->tab[q->head];
...@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
ntab[tail++] = skb; ntab[tail++] = skb;
continue; continue;
} }
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
--sch->q.qlen; --sch->q.qlen;
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
} }
qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen); qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
q->head = 0; q->head = 0;
q->tail = tail; q->tail = tail;
} }
......
...@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) ...@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
* or HTB crashes. Defer it for next round. * or HTB crashes. Defer it for next round.
*/ */
if (q->stats.drop_count && sch->q.qlen) { if (q->stats.drop_count && sch->q.qlen) {
qdisc_tree_decrease_qlen(sch, q->stats.drop_count); qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
q->stats.drop_count = 0; q->stats.drop_count = 0;
q->stats.drop_len = 0;
} }
if (skb) if (skb)
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
...@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
{ {
struct codel_sched_data *q = qdisc_priv(sch); struct codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CODEL_MAX + 1]; struct nlattr *tb[TCA_CODEL_MAX + 1];
unsigned int qlen; unsigned int qlen, dropped = 0;
int err; int err;
if (!opt) if (!opt)
...@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) { while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q); struct sk_buff *skb = __skb_dequeue(&sch->q);
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
} }
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch_tree_unlock(sch); sch_tree_unlock(sch);
return 0; return 0;
......
...@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) ...@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
static void drr_purge_queue(struct drr_class *cl) static void drr_purge_queue(struct drr_class *cl)
{ {
unsigned int len = cl->qdisc->q.qlen; unsigned int len = cl->qdisc->q.qlen;
unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc); qdisc_reset(cl->qdisc);
qdisc_tree_decrease_qlen(cl->qdisc, len); qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
} }
static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
......
...@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
struct fq_sched_data *q = qdisc_priv(sch); struct fq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_MAX + 1]; struct nlattr *tb[TCA_FQ_MAX + 1];
int err, drop_count = 0; int err, drop_count = 0;
unsigned drop_len = 0;
u32 fq_log; u32 fq_log;
if (!opt) if (!opt)
...@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (!skb) if (!skb)
break; break;
drop_len += qdisc_pkt_len(skb);
kfree_skb(skb); kfree_skb(skb);
drop_count++; drop_count++;
} }
qdisc_tree_decrease_qlen(sch, drop_count); qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
sch_tree_unlock(sch); sch_tree_unlock(sch);
return err; return err;
......
...@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch) ...@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_sched_data *q = qdisc_priv(sch);
unsigned int idx; unsigned int idx, prev_backlog;
struct fq_codel_flow *flow; struct fq_codel_flow *flow;
int uninitialized_var(ret); int uninitialized_var(ret);
...@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit) if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
prev_backlog = sch->qstats.backlog;
q->drop_overlimit++; q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet /* Return Congestion Notification only if we dropped a packet
* from this flow. * from this flow.
...@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN; return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */ /* As we dropped a packet, better let upper stack know this */
qdisc_tree_decrease_qlen(sch, 1); qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) ...@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
struct fq_codel_flow *flow; struct fq_codel_flow *flow;
struct list_head *head; struct list_head *head;
u32 prev_drop_count, prev_ecn_mark; u32 prev_drop_count, prev_ecn_mark;
unsigned int prev_backlog;
begin: begin:
head = &q->new_flows; head = &q->new_flows;
...@@ -259,6 +261,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) ...@@ -259,6 +261,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
prev_drop_count = q->cstats.drop_count; prev_drop_count = q->cstats.drop_count;
prev_ecn_mark = q->cstats.ecn_mark; prev_ecn_mark = q->cstats.ecn_mark;
prev_backlog = sch->qstats.backlog;
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
dequeue); dequeue);
...@@ -276,12 +279,14 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) ...@@ -276,12 +279,14 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
} }
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
flow->deficit -= qdisc_pkt_len(skb); flow->deficit -= qdisc_pkt_len(skb);
/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
* or HTB crashes. Defer it for next round. * or HTB crashes. Defer it for next round.
*/ */
if (q->cstats.drop_count && sch->q.qlen) { if (q->cstats.drop_count && sch->q.qlen) {
qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
q->cstats.drop_len);
q->cstats.drop_count = 0; q->cstats.drop_count = 0;
q->cstats.drop_len = 0;
} }
return skb; return skb;
} }
...@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) { while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_codel_dequeue(sch); struct sk_buff *skb = fq_codel_dequeue(sch);
q->cstats.drop_len += qdisc_pkt_len(skb);
kfree_skb(skb); kfree_skb(skb);
q->cstats.drop_count++; q->cstats.drop_count++;
} }
qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
q->cstats.drop_count = 0; q->cstats.drop_count = 0;
q->cstats.drop_len = 0;
sch_tree_unlock(sch); sch_tree_unlock(sch);
return 0; return 0;
......
...@@ -895,9 +895,10 @@ static void ...@@ -895,9 +895,10 @@ static void
hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
{ {
unsigned int len = cl->qdisc->q.qlen; unsigned int len = cl->qdisc->q.qlen;
unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc); qdisc_reset(cl->qdisc);
qdisc_tree_decrease_qlen(cl->qdisc, len); qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
} }
static void static void
......
...@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct hhf_sched_data *q = qdisc_priv(sch); struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx; enum wdrr_bucket_idx idx;
struct wdrr_bucket *bucket; struct wdrr_bucket *bucket;
unsigned int prev_backlog;
idx = hhf_classify(skb, sch); idx = hhf_classify(skb, sch);
...@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit) if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
prev_backlog = sch->qstats.backlog;
q->drop_overlimit++; q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet from this /* Return Congestion Notification only if we dropped a packet from this
* bucket. * bucket.
...@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN; return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */ /* As we dropped a packet, better let upper stack know this. */
qdisc_tree_decrease_qlen(sch, 1); qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
{ {
struct hhf_sched_data *q = qdisc_priv(sch); struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1]; struct nlattr *tb[TCA_HHF_MAX + 1];
unsigned int qlen; unsigned int qlen, prev_backlog;
int err; int err;
u64 non_hh_quantum; u64 non_hh_quantum;
u32 new_quantum = q->quantum; u32 new_quantum = q->quantum;
...@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
} }
qlen = sch->q.qlen; qlen = sch->q.qlen;
prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) { while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = hhf_dequeue(sch); struct sk_buff *skb = hhf_dequeue(sch);
kfree_skb(skb); kfree_skb(skb);
} }
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
prev_backlog - sch->qstats.backlog);
sch_tree_unlock(sch); sch_tree_unlock(sch);
return 0; return 0;
......
...@@ -1265,7 +1265,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) ...@@ -1265,7 +1265,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
{ {
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg; struct htb_class *cl = (struct htb_class *)arg;
unsigned int qlen;
struct Qdisc *new_q = NULL; struct Qdisc *new_q = NULL;
int last_child = 0; int last_child = 0;
...@@ -1285,9 +1284,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) ...@@ -1285,9 +1284,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch); sch_tree_lock(sch);
if (!cl->level) { if (!cl->level) {
qlen = cl->un.leaf.q->q.qlen; unsigned int qlen = cl->un.leaf.q->q.qlen;
unsigned int backlog = cl->un.leaf.q->qstats.backlog;
qdisc_reset(cl->un.leaf.q); qdisc_reset(cl->un.leaf.q);
qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
} }
/* delete from hash and active; remainder in destroy_class */ /* delete from hash and active; remainder in destroy_class */
...@@ -1421,10 +1422,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1421,10 +1422,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
sch_tree_lock(sch); sch_tree_lock(sch);
if (parent && !parent->level) { if (parent && !parent->level) {
unsigned int qlen = parent->un.leaf.q->q.qlen; unsigned int qlen = parent->un.leaf.q->q.qlen;
unsigned int backlog = parent->un.leaf.q->qstats.backlog;
/* turn parent into inner node */ /* turn parent into inner node */
qdisc_reset(parent->un.leaf.q); qdisc_reset(parent->un.leaf.q);
qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen); qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
qdisc_destroy(parent->un.leaf.q); qdisc_destroy(parent->un.leaf.q);
if (parent->prio_activity) if (parent->prio_activity)
htb_deactivate(q, parent); htb_deactivate(q, parent);
......
...@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) ...@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
if (q->queues[i] != &noop_qdisc) { if (q->queues[i] != &noop_qdisc) {
struct Qdisc *child = q->queues[i]; struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc; q->queues[i] = &noop_qdisc;
qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_tree_reduce_backlog(child, child->q.qlen,
child->qstats.backlog);
qdisc_destroy(child); qdisc_destroy(child);
} }
} }
...@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) ...@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
q->queues[i] = child; q->queues[i] = child;
if (old != &noop_qdisc) { if (old != &noop_qdisc) {
qdisc_tree_decrease_qlen(old, qdisc_tree_reduce_backlog(old,
old->q.qlen); old->q.qlen,
old->qstats.backlog);
qdisc_destroy(old); qdisc_destroy(old);
} }
sch_tree_unlock(sch); sch_tree_unlock(sch);
......
...@@ -598,7 +598,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -598,7 +598,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
qdisc_tree_decrease_qlen(sch, 1); qdisc_tree_reduce_backlog(sch, 1,
qdisc_pkt_len(skb));
} }
} }
goto tfifo_dequeue; goto tfifo_dequeue;
......
...@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
{ {
struct pie_sched_data *q = qdisc_priv(sch); struct pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_PIE_MAX + 1]; struct nlattr *tb[TCA_PIE_MAX + 1];
unsigned int qlen; unsigned int qlen, dropped = 0;
int err; int err;
if (!opt) if (!opt)
...@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) { while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q); struct sk_buff *skb = __skb_dequeue(&sch->q);
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
} }
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch_tree_unlock(sch); sch_tree_unlock(sch);
return 0; return 0;
......
...@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) ...@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
struct Qdisc *child = q->queues[i]; struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc; q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) { if (child != &noop_qdisc) {
qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
qdisc_destroy(child); qdisc_destroy(child);
} }
} }
...@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) ...@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->queues[i] = child; q->queues[i] = child;
if (old != &noop_qdisc) { if (old != &noop_qdisc) {
qdisc_tree_decrease_qlen(old, qdisc_tree_reduce_backlog(old,
old->q.qlen); old->q.qlen,
old->qstats.backlog);
qdisc_destroy(old); qdisc_destroy(old);
} }
sch_tree_unlock(sch); sch_tree_unlock(sch);
......
...@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) ...@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
static void qfq_purge_queue(struct qfq_class *cl) static void qfq_purge_queue(struct qfq_class *cl)
{ {
unsigned int len = cl->qdisc->q.qlen; unsigned int len = cl->qdisc->q.qlen;
unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc); qdisc_reset(cl->qdisc);
qdisc_tree_decrease_qlen(cl->qdisc, len); qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
} }
static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
......
...@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
q->flags = ctl->flags; q->flags = ctl->flags;
q->limit = ctl->limit; q->limit = ctl->limit;
if (child) { if (child) {
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc); qdisc_destroy(q->qdisc);
q->qdisc = child; q->qdisc = child;
} }
......
...@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch); sch_tree_lock(sch);
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc); qdisc_destroy(q->qdisc);
q->qdisc = child; q->qdisc = child;
......
...@@ -346,7 +346,7 @@ static int ...@@ -346,7 +346,7 @@ static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct sfq_sched_data *q = qdisc_priv(sch); struct sfq_sched_data *q = qdisc_priv(sch);
unsigned int hash; unsigned int hash, dropped;
sfq_index x, qlen; sfq_index x, qlen;
struct sfq_slot *slot; struct sfq_slot *slot;
int uninitialized_var(ret); int uninitialized_var(ret);
...@@ -461,7 +461,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -461,7 +461,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
qlen = slot->qlen; qlen = slot->qlen;
sfq_drop(sch); dropped = sfq_drop(sch);
/* Return Congestion Notification only if we dropped a packet /* Return Congestion Notification only if we dropped a packet
* from this flow. * from this flow.
*/ */
...@@ -469,7 +469,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -469,7 +469,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN; return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */ /* As we dropped a packet, better let upper stack know this */
qdisc_tree_decrease_qlen(sch, 1); qdisc_tree_reduce_backlog(sch, 1, dropped);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch) ...@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
struct sfq_slot *slot; struct sfq_slot *slot;
struct sk_buff_head list; struct sk_buff_head list;
int dropped = 0; int dropped = 0;
unsigned int drop_len = 0;
__skb_queue_head_init(&list); __skb_queue_head_init(&list);
...@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch) ...@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
if (x >= SFQ_MAX_FLOWS) { if (x >= SFQ_MAX_FLOWS) {
drop: drop:
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
drop_len += qdisc_pkt_len(skb);
kfree_skb(skb); kfree_skb(skb);
dropped++; dropped++;
continue; continue;
...@@ -594,7 +596,7 @@ static void sfq_rehash(struct Qdisc *sch) ...@@ -594,7 +596,7 @@ static void sfq_rehash(struct Qdisc *sch)
} }
} }
sch->q.qlen -= dropped; sch->q.qlen -= dropped;
qdisc_tree_decrease_qlen(sch, dropped); qdisc_tree_reduce_backlog(sch, dropped, drop_len);
} }
static void sfq_perturbation(unsigned long arg) static void sfq_perturbation(unsigned long arg)
...@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
struct sfq_sched_data *q = qdisc_priv(sch); struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt); struct tc_sfq_qopt *ctl = nla_data(opt);
struct tc_sfq_qopt_v1 *ctl_v1 = NULL; struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
unsigned int qlen; unsigned int qlen, dropped = 0;
struct red_parms *p = NULL; struct red_parms *p = NULL;
if (opt->nla_len < nla_attr_size(sizeof(*ctl))) if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
...@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
qlen = sch->q.qlen; qlen = sch->q.qlen;
while (sch->q.qlen > q->limit) while (sch->q.qlen > q->limit)
sfq_drop(sch); dropped += sfq_drop(sch);
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
del_timer(&q->perturb_timer); del_timer(&q->perturb_timer);
if (q->perturb_period) { if (q->perturb_period) {
......
...@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb; struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb); netdev_features_t features = netif_skb_features(skb);
unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
int ret, nb; int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
...@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
nskb = segs->next; nskb = segs->next;
segs->next = NULL; segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc); ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
...@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
} }
sch->q.qlen += nb; sch->q.qlen += nb;
if (nb > 1) if (nb > 1)
qdisc_tree_decrease_qlen(sch, 1 - nb); qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
consume_skb(skb); consume_skb(skb);
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
} }
...@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch); sch_tree_lock(sch);
if (child) { if (child) {
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc); qdisc_destroy(q->qdisc);
q->qdisc = child; q->qdisc = child;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment