Commit e3ba9650 authored by Thomas Graf's avatar Thomas Graf Committed by David S. Miller

[PKT_SCHED]: Replace tc_stats with new gnet_stats in struct Qdisc

Replaces tc_stats with gnet_stats replacements in struct
Qdisc and adapts all qdiscs to use them.
Signed-off-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a63563e2
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <net/gen_stats.h>
struct rtattr; struct rtattr;
struct Qdisc; struct Qdisc;
...@@ -86,7 +87,9 @@ struct Qdisc ...@@ -86,7 +87,9 @@ struct Qdisc
struct net_device *dev; struct net_device *dev;
struct list_head list; struct list_head list;
struct tc_stats stats; struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
spinlock_t *stats_lock; spinlock_t *stats_lock;
struct rcu_head q_rcu; struct rcu_head q_rcu;
int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q);
......
...@@ -433,8 +433,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -433,8 +433,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#endif #endif
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
sch->stats.packets++; sch->bstats.packets++;
sch->stats.bytes+=len; sch->bstats.bytes+=len;
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
...@@ -443,7 +443,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -443,7 +443,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
#ifndef CONFIG_NET_CLS_ACT #ifndef CONFIG_NET_CLS_ACT
sch->stats.drops++; sch->qstats.drops++;
if (cl == NULL) if (cl == NULL)
kfree_skb(skb); kfree_skb(skb);
else { else {
...@@ -452,7 +452,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -452,7 +452,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
#else #else
if ( NET_XMIT_DROP == ret) { if ( NET_XMIT_DROP == ret) {
sch->stats.drops++; sch->qstats.drops++;
} }
if (cl != NULL) { if (cl != NULL) {
...@@ -472,7 +472,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -472,7 +472,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
if ((cl = q->tx_class) == NULL) { if ((cl = q->tx_class) == NULL) {
kfree_skb(skb); kfree_skb(skb);
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_CN; return NET_XMIT_CN;
} }
q->tx_class = NULL; q->tx_class = NULL;
...@@ -489,7 +489,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -489,7 +489,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
cbq_activate_class(cl); cbq_activate_class(cl);
return 0; return 0;
} }
sch->stats.drops++; sch->qstats.drops++;
cl->stats.drops++; cl->stats.drops++;
return ret; return ret;
} }
...@@ -729,17 +729,17 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) ...@@ -729,17 +729,17 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
if (cl->q->enqueue(skb, cl->q) == 0) { if (cl->q->enqueue(skb, cl->q) == 0) {
sch->q.qlen++; sch->q.qlen++;
sch->stats.packets++; sch->bstats.packets++;
sch->stats.bytes+=len; sch->bstats.bytes+=len;
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
return 0; return 0;
} }
sch->stats.drops++; sch->qstats.drops++;
return 0; return 0;
} }
sch->stats.drops++; sch->qstats.drops++;
return -1; return -1;
} }
#endif #endif
...@@ -1090,7 +1090,7 @@ cbq_dequeue(struct Qdisc *sch) ...@@ -1090,7 +1090,7 @@ cbq_dequeue(struct Qdisc *sch)
Sigh... start watchdog timer in the last case. */ Sigh... start watchdog timer in the last case. */
if (sch->q.qlen) { if (sch->q.qlen) {
sch->stats.overlimits++; sch->qstats.overlimits++;
if (q->wd_expires) { if (q->wd_expires) {
long delay = PSCHED_US2JIFFIE(q->wd_expires); long delay = PSCHED_US2JIFFIE(q->wd_expires);
if (delay <= 0) if (delay <= 0)
......
...@@ -241,11 +241,11 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch) ...@@ -241,11 +241,11 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
#endif #endif
((ret = p->q->enqueue(skb,p->q)) != 0)) { ((ret = p->q->enqueue(skb,p->q)) != 0)) {
sch->stats.drops++; sch->qstats.drops++;
return ret; return ret;
} }
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
sch->q.qlen++; sch->q.qlen++;
return ret; return ret;
} }
...@@ -299,7 +299,7 @@ static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch) ...@@ -299,7 +299,7 @@ static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch)
sch->q.qlen++; sch->q.qlen++;
return 0; return 0;
} }
sch->stats.drops++; sch->qstats.drops++;
return ret; return ret;
} }
......
...@@ -47,14 +47,14 @@ bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -47,14 +47,14 @@ bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{ {
struct fifo_sched_data *q = qdisc_priv(sch); struct fifo_sched_data *q = qdisc_priv(sch);
if (sch->stats.backlog + skb->len <= q->limit) { if (sch->qstats.backlog + skb->len <= q->limit) {
__skb_queue_tail(&sch->q, skb); __skb_queue_tail(&sch->q, skb);
sch->stats.backlog += skb->len; sch->qstats.backlog += skb->len;
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
return 0; return 0;
} }
sch->stats.drops++; sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch)) if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
#endif #endif
...@@ -66,7 +66,7 @@ static int ...@@ -66,7 +66,7 @@ static int
bfifo_requeue(struct sk_buff *skb, struct Qdisc* sch) bfifo_requeue(struct sk_buff *skb, struct Qdisc* sch)
{ {
__skb_queue_head(&sch->q, skb); __skb_queue_head(&sch->q, skb);
sch->stats.backlog += skb->len; sch->qstats.backlog += skb->len;
return 0; return 0;
} }
...@@ -77,7 +77,7 @@ bfifo_dequeue(struct Qdisc* sch) ...@@ -77,7 +77,7 @@ bfifo_dequeue(struct Qdisc* sch)
skb = __skb_dequeue(&sch->q); skb = __skb_dequeue(&sch->q);
if (skb) if (skb)
sch->stats.backlog -= skb->len; sch->qstats.backlog -= skb->len;
return skb; return skb;
} }
...@@ -89,7 +89,7 @@ fifo_drop(struct Qdisc* sch) ...@@ -89,7 +89,7 @@ fifo_drop(struct Qdisc* sch)
skb = __skb_dequeue_tail(&sch->q); skb = __skb_dequeue_tail(&sch->q);
if (skb) { if (skb) {
unsigned int len = skb->len; unsigned int len = skb->len;
sch->stats.backlog -= len; sch->qstats.backlog -= len;
kfree_skb(skb); kfree_skb(skb);
return len; return len;
} }
...@@ -100,7 +100,7 @@ static void ...@@ -100,7 +100,7 @@ static void
fifo_reset(struct Qdisc* sch) fifo_reset(struct Qdisc* sch)
{ {
skb_queue_purge(&sch->q); skb_queue_purge(&sch->q);
sch->stats.backlog = 0; sch->qstats.backlog = 0;
} }
static int static int
...@@ -110,11 +110,11 @@ pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -110,11 +110,11 @@ pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (sch->q.qlen < q->limit) { if (sch->q.qlen < q->limit) {
__skb_queue_tail(&sch->q, skb); __skb_queue_tail(&sch->q, skb);
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
return 0; return 0;
} }
sch->stats.drops++; sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch)) if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
#endif #endif
......
...@@ -318,11 +318,11 @@ pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) ...@@ -318,11 +318,11 @@ pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
if (list->qlen < qdisc->dev->tx_queue_len) { if (list->qlen < qdisc->dev->tx_queue_len) {
__skb_queue_tail(list, skb); __skb_queue_tail(list, skb);
qdisc->q.qlen++; qdisc->q.qlen++;
qdisc->stats.bytes += skb->len; qdisc->bstats.bytes += skb->len;
qdisc->stats.packets++; qdisc->bstats.packets++;
return 0; return 0;
} }
qdisc->stats.drops++; qdisc->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
......
...@@ -130,7 +130,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -130,7 +130,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d " D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
"general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog, "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
sch->stats.backlog); sch->qstats.backlog);
/* sum up all the qaves of prios <= to ours to get the new qave*/ /* sum up all the qaves of prios <= to ours to get the new qave*/
if (!t->eqp && t->grio) { if (!t->eqp && t->grio) {
for (i=0;i<t->DPs;i++) { for (i=0;i<t->DPs;i++) {
...@@ -161,7 +161,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -161,7 +161,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF]; q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
} else { } else {
if (t->eqp) { if (t->eqp) {
q->qave += sch->stats.backlog - (q->qave >> q->Wlog); q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
} else { } else {
q->qave += q->backlog - (q->qave >> q->Wlog); q->qave += q->backlog - (q->qave >> q->Wlog);
} }
...@@ -179,9 +179,9 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -179,9 +179,9 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
q->backlog += skb->len; q->backlog += skb->len;
do_enqueue: do_enqueue:
__skb_queue_tail(&sch->q, skb); __skb_queue_tail(&sch->q, skb);
sch->stats.backlog += skb->len; sch->qstats.backlog += skb->len;
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
return 0; return 0;
} else { } else {
q->pdrop++; q->pdrop++;
...@@ -189,12 +189,12 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -189,12 +189,12 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
drop: drop:
kfree_skb(skb); kfree_skb(skb);
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
if ((q->qave+qave) >= q->qth_max) { if ((q->qave+qave) >= q->qth_max) {
q->qcount = -1; q->qcount = -1;
sch->stats.overlimits++; sch->qstats.overlimits++;
q->forced++; q->forced++;
goto drop; goto drop;
} }
...@@ -203,7 +203,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -203,7 +203,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
goto enqueue; goto enqueue;
q->qcount = 0; q->qcount = 0;
q->qR = net_random()&q->Rmask; q->qR = net_random()&q->Rmask;
sch->stats.overlimits++; sch->qstats.overlimits++;
q->early++; q->early++;
goto drop; goto drop;
} }
...@@ -221,7 +221,7 @@ gred_requeue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -221,7 +221,7 @@ gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
PSCHED_SET_PASTPERFECT(q->qidlestart); PSCHED_SET_PASTPERFECT(q->qidlestart);
__skb_queue_head(&sch->q, skb); __skb_queue_head(&sch->q, skb);
sch->stats.backlog += skb->len; sch->qstats.backlog += skb->len;
q->backlog += skb->len; q->backlog += skb->len;
return 0; return 0;
} }
...@@ -235,7 +235,7 @@ gred_dequeue(struct Qdisc* sch) ...@@ -235,7 +235,7 @@ gred_dequeue(struct Qdisc* sch)
skb = __skb_dequeue(&sch->q); skb = __skb_dequeue(&sch->q);
if (skb) { if (skb) {
sch->stats.backlog -= skb->len; sch->qstats.backlog -= skb->len;
q= t->tab[(skb->tc_index&0xf)]; q= t->tab[(skb->tc_index&0xf)];
if (q) { if (q) {
q->backlog -= skb->len; q->backlog -= skb->len;
...@@ -269,8 +269,8 @@ static unsigned int gred_drop(struct Qdisc* sch) ...@@ -269,8 +269,8 @@ static unsigned int gred_drop(struct Qdisc* sch)
skb = __skb_dequeue_tail(&sch->q); skb = __skb_dequeue_tail(&sch->q);
if (skb) { if (skb) {
unsigned int len = skb->len; unsigned int len = skb->len;
sch->stats.backlog -= len; sch->qstats.backlog -= len;
sch->stats.drops++; sch->qstats.drops++;
q= t->tab[(skb->tc_index&0xf)]; q= t->tab[(skb->tc_index&0xf)];
if (q) { if (q) {
q->backlog -= len; q->backlog -= len;
...@@ -304,7 +304,7 @@ static void gred_reset(struct Qdisc* sch) ...@@ -304,7 +304,7 @@ static void gred_reset(struct Qdisc* sch)
__skb_queue_purge(&sch->q); __skb_queue_purge(&sch->q);
sch->stats.backlog = 0; sch->qstats.backlog = 0;
for (i=0;i<t->DPs;i++) { for (i=0;i<t->DPs;i++) {
q= t->tab[i]; q= t->tab[i];
......
...@@ -1677,14 +1677,14 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1677,14 +1677,14 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (cl == NULL) { if (cl == NULL) {
if (NET_XMIT_DROP == ret) { if (NET_XMIT_DROP == ret) {
sch->stats.drops++; sch->qstats.drops++;
} }
return ret; return ret;
} }
#else #else
if (cl == NULL) { if (cl == NULL) {
kfree_skb(skb); kfree_skb(skb);
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
#endif #endif
...@@ -1692,7 +1692,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1692,7 +1692,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = cl->qdisc->enqueue(skb, cl->qdisc); err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
cl->stats.drops++; cl->stats.drops++;
sch->stats.drops++; sch->qstats.drops++;
return err; return err;
} }
...@@ -1701,8 +1701,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1701,8 +1701,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->stats.packets++; cl->stats.packets++;
cl->stats.bytes += len; cl->stats.bytes += len;
sch->stats.packets++; sch->bstats.packets++;
sch->stats.bytes += len; sch->bstats.bytes += len;
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
...@@ -1739,7 +1739,7 @@ hfsc_dequeue(struct Qdisc *sch) ...@@ -1739,7 +1739,7 @@ hfsc_dequeue(struct Qdisc *sch)
*/ */
cl = vttree_get_minvt(&q->root, cur_time); cl = vttree_get_minvt(&q->root, cur_time);
if (cl == NULL) { if (cl == NULL) {
sch->stats.overlimits++; sch->qstats.overlimits++;
hfsc_schedule_watchdog(sch, cur_time); hfsc_schedule_watchdog(sch, cur_time);
return NULL; return NULL;
} }
...@@ -1804,7 +1804,7 @@ hfsc_drop(struct Qdisc *sch) ...@@ -1804,7 +1804,7 @@ hfsc_drop(struct Qdisc *sch)
list_move_tail(&cl->dlist, &q->droplist); list_move_tail(&cl->dlist, &q->droplist);
} }
cl->stats.drops++; cl->stats.drops++;
sch->stats.drops++; sch->qstats.drops++;
sch->q.qlen--; sch->q.qlen--;
return len; return len;
} }
......
...@@ -735,7 +735,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -735,7 +735,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
} else if (!cl) { } else if (!cl) {
if (NET_XMIT_DROP == ret) { if (NET_XMIT_DROP == ret) {
sch->stats.drops++; sch->qstats.drops++;
} }
return ret; return ret;
} }
...@@ -747,13 +747,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -747,13 +747,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->direct_pkts++; q->direct_pkts++;
} else { } else {
kfree_skb (skb); kfree_skb (skb);
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
} }
#endif #endif
else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->stats.drops++; sch->qstats.drops++;
cl->stats.drops++; cl->stats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else { } else {
...@@ -762,7 +762,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -762,7 +762,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
sch->q.qlen++; sch->q.qlen++;
sch->stats.packets++; sch->stats.bytes += skb->len; sch->bstats.packets++; sch->bstats.bytes += skb->len;
HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb); HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -783,11 +783,11 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -783,11 +783,11 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
__skb_queue_head(&q->direct_queue, skb); __skb_queue_head(&q->direct_queue, skb);
tskb = __skb_dequeue_tail(&q->direct_queue); tskb = __skb_dequeue_tail(&q->direct_queue);
kfree_skb (tskb); kfree_skb (tskb);
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_CN; return NET_XMIT_CN;
} }
} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->stats.drops++; sch->qstats.drops++;
cl->stats.drops++; cl->stats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else } else
...@@ -1117,7 +1117,7 @@ static void htb_delay_by(struct Qdisc *sch,long delay) ...@@ -1117,7 +1117,7 @@ static void htb_delay_by(struct Qdisc *sch,long delay)
/* why don't use jiffies here ? because expires can be in past */ /* why don't use jiffies here ? because expires can be in past */
mod_timer(&q->timer, q->jiffies + delay); mod_timer(&q->timer, q->jiffies + delay);
sch->flags |= TCQ_F_THROTTLED; sch->flags |= TCQ_F_THROTTLED;
sch->stats.overlimits++; sch->qstats.overlimits++;
HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay); HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
} }
......
...@@ -151,12 +151,12 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) ...@@ -151,12 +151,12 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
* firewall FW_* code. * firewall FW_* code.
*/ */
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
sch->stats.packets++; sch->bstats.packets++;
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
switch (result) { switch (result) {
case TC_ACT_SHOT: case TC_ACT_SHOT:
result = TC_ACT_SHOT; result = TC_ACT_SHOT;
sch->stats.drops++; sch->qstats.drops++;
break; break;
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
...@@ -176,14 +176,14 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) ...@@ -176,14 +176,14 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
switch (result) { switch (result) {
case TC_POLICE_SHOT: case TC_POLICE_SHOT:
result = NF_DROP; result = NF_DROP;
sch->stats.drops++; sch->qstats.drops++;
break; break;
case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */ case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */
case TC_POLICE_OK: case TC_POLICE_OK:
case TC_POLICE_UNSPEC: case TC_POLICE_UNSPEC:
default: default:
sch->stats.packets++; sch->bstats.packets++;
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
result = NF_ACCEPT; result = NF_ACCEPT;
break; break;
}; };
...@@ -191,8 +191,8 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) ...@@ -191,8 +191,8 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
#else #else
D2PRINTK("Overriding result to ACCEPT\n"); D2PRINTK("Overriding result to ACCEPT\n");
result = NF_ACCEPT; result = NF_ACCEPT;
sch->stats.packets++; sch->bstats.packets++;
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
#endif #endif
#endif #endif
......
...@@ -153,12 +153,12 @@ static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) ...@@ -153,12 +153,12 @@ static int delay_skb(struct Qdisc *sch, struct sk_buff *skb)
if (likely(q->delayed.qlen < q->limit)) { if (likely(q->delayed.qlen < q->limit)) {
__skb_queue_tail(&q->delayed, skb); __skb_queue_tail(&q->delayed, skb);
sch->q.qlen++; sch->q.qlen++;
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
sch->stats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
...@@ -172,7 +172,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -172,7 +172,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Random packet drop 0 => none, ~0 => all */ /* Random packet drop 0 => none, ~0 => all */
if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { if (q->loss && q->loss >= get_crandom(&q->loss_cor)) {
pr_debug("netem_enqueue: random loss\n"); pr_debug("netem_enqueue: random loss\n");
sch->stats.drops++; sch->qstats.drops++;
return 0; /* lie about loss so TCP doesn't know */ return 0; /* lie about loss so TCP doesn't know */
} }
...@@ -196,7 +196,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -196,7 +196,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
++q->counter; ++q->counter;
ret = q->qdisc->enqueue(skb, q->qdisc); ret = q->qdisc->enqueue(skb, q->qdisc);
if (ret) if (ret)
sch->stats.drops++; sch->qstats.drops++;
return ret; return ret;
} }
...@@ -224,7 +224,7 @@ static unsigned int netem_drop(struct Qdisc* sch) ...@@ -224,7 +224,7 @@ static unsigned int netem_drop(struct Qdisc* sch)
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) { if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
sch->q.qlen--; sch->q.qlen--;
sch->stats.drops++; sch->qstats.drops++;
} }
return len; return len;
} }
...@@ -256,7 +256,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -256,7 +256,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
__skb_unlink(skb, &q->delayed); __skb_unlink(skb, &q->delayed);
if (q->qdisc->enqueue(skb, q->qdisc)) if (q->qdisc->enqueue(skb, q->qdisc))
sch->stats.drops++; sch->qstats.drops++;
} }
skb = q->qdisc->dequeue(q->qdisc); skb = q->qdisc->dequeue(q->qdisc);
......
...@@ -107,8 +107,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -107,8 +107,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc* sch)
goto dropped; goto dropped;
if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -117,10 +117,10 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -117,10 +117,10 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc* sch)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (NET_XMIT_DROP == ret) { if (NET_XMIT_DROP == ret) {
#endif #endif
sch->stats.drops++; sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
} else { } else {
sch->stats.overlimits++; /* abuse, but noone uses it */ sch->qstats.overlimits++; /* abuse, but noone uses it */
} }
#endif #endif
return ret; return ret;
...@@ -142,7 +142,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -142,7 +142,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
return 0; return 0;
} }
dropped: dropped:
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
......
...@@ -228,13 +228,13 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -228,13 +228,13 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
q->qave >>= 1; q->qave >>= 1;
} }
} else { } else {
q->qave += sch->stats.backlog - (q->qave >> q->Wlog); q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
/* NOTE: /* NOTE:
q->qave is fixed point number with point at Wlog. q->qave is fixed point number with point at Wlog.
The formulae above is equvalent to floating point The formulae above is equvalent to floating point
version: version:
qave = qave*(1-W) + sch->stats.backlog*W; qave = qave*(1-W) + sch->qstats.backlog*W;
--ANK (980924) --ANK (980924)
*/ */
} }
...@@ -242,22 +242,22 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -242,22 +242,22 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (q->qave < q->qth_min) { if (q->qave < q->qth_min) {
q->qcount = -1; q->qcount = -1;
enqueue: enqueue:
if (sch->stats.backlog + skb->len <= q->limit) { if (sch->qstats.backlog + skb->len <= q->limit) {
__skb_queue_tail(&sch->q, skb); __skb_queue_tail(&sch->q, skb);
sch->stats.backlog += skb->len; sch->qstats.backlog += skb->len;
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} else { } else {
q->st.pdrop++; q->st.pdrop++;
} }
kfree_skb(skb); kfree_skb(skb);
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
if (q->qave >= q->qth_max) { if (q->qave >= q->qth_max) {
q->qcount = -1; q->qcount = -1;
sch->stats.overlimits++; sch->qstats.overlimits++;
mark: mark:
if (!(q->flags&TC_RED_ECN) || !red_ecn_mark(skb)) { if (!(q->flags&TC_RED_ECN) || !red_ecn_mark(skb)) {
q->st.early++; q->st.early++;
...@@ -288,7 +288,7 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -288,7 +288,7 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
goto enqueue; goto enqueue;
q->qcount = 0; q->qcount = 0;
q->qR = net_random()&q->Rmask; q->qR = net_random()&q->Rmask;
sch->stats.overlimits++; sch->qstats.overlimits++;
goto mark; goto mark;
} }
q->qR = net_random()&q->Rmask; q->qR = net_random()&q->Rmask;
...@@ -296,7 +296,7 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -296,7 +296,7 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
drop: drop:
kfree_skb(skb); kfree_skb(skb);
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_CN; return NET_XMIT_CN;
} }
...@@ -308,7 +308,7 @@ red_requeue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -308,7 +308,7 @@ red_requeue(struct sk_buff *skb, struct Qdisc* sch)
PSCHED_SET_PASTPERFECT(q->qidlestart); PSCHED_SET_PASTPERFECT(q->qidlestart);
__skb_queue_head(&sch->q, skb); __skb_queue_head(&sch->q, skb);
sch->stats.backlog += skb->len; sch->qstats.backlog += skb->len;
return 0; return 0;
} }
...@@ -320,7 +320,7 @@ red_dequeue(struct Qdisc* sch) ...@@ -320,7 +320,7 @@ red_dequeue(struct Qdisc* sch)
skb = __skb_dequeue(&sch->q); skb = __skb_dequeue(&sch->q);
if (skb) { if (skb) {
sch->stats.backlog -= skb->len; sch->qstats.backlog -= skb->len;
return skb; return skb;
} }
PSCHED_GET_TIME(q->qidlestart); PSCHED_GET_TIME(q->qidlestart);
...@@ -335,8 +335,8 @@ static unsigned int red_drop(struct Qdisc* sch) ...@@ -335,8 +335,8 @@ static unsigned int red_drop(struct Qdisc* sch)
skb = __skb_dequeue_tail(&sch->q); skb = __skb_dequeue_tail(&sch->q);
if (skb) { if (skb) {
unsigned int len = skb->len; unsigned int len = skb->len;
sch->stats.backlog -= len; sch->qstats.backlog -= len;
sch->stats.drops++; sch->qstats.drops++;
q->st.other++; q->st.other++;
kfree_skb(skb); kfree_skb(skb);
return len; return len;
...@@ -350,7 +350,7 @@ static void red_reset(struct Qdisc* sch) ...@@ -350,7 +350,7 @@ static void red_reset(struct Qdisc* sch)
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
__skb_queue_purge(&sch->q); __skb_queue_purge(&sch->q);
sch->stats.backlog = 0; sch->qstats.backlog = 0;
PSCHED_SET_PASTPERFECT(q->qidlestart); PSCHED_SET_PASTPERFECT(q->qidlestart);
q->qave = 0; q->qave = 0;
q->qcount = -1; q->qcount = -1;
......
...@@ -227,7 +227,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) ...@@ -227,7 +227,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
kfree_skb(skb); kfree_skb(skb);
sfq_dec(q, x); sfq_dec(q, x);
sch->q.qlen--; sch->q.qlen--;
sch->stats.drops++; sch->qstats.drops++;
return len; return len;
} }
...@@ -243,7 +243,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) ...@@ -243,7 +243,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
sfq_dec(q, d); sfq_dec(q, d);
sch->q.qlen--; sch->q.qlen--;
q->ht[q->hash[d]] = SFQ_DEPTH; q->ht[q->hash[d]] = SFQ_DEPTH;
sch->stats.drops++; sch->qstats.drops++;
return len; return len;
} }
...@@ -276,8 +276,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -276,8 +276,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
} }
} }
if (++sch->q.qlen < q->limit-1) { if (++sch->q.qlen < q->limit-1) {
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
return 0; return 0;
} }
...@@ -313,7 +313,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -313,7 +313,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
if (++sch->q.qlen < q->limit - 1) if (++sch->q.qlen < q->limit - 1)
return 0; return 0;
sch->stats.drops++; sch->qstats.drops++;
sfq_drop(sch); sfq_drop(sch);
return NET_XMIT_CN; return NET_XMIT_CN;
} }
......
...@@ -141,7 +141,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -141,7 +141,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
int ret; int ret;
if (skb->len > q->max_size) { if (skb->len > q->max_size) {
sch->stats.drops++; sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
#endif #endif
...@@ -151,13 +151,13 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -151,13 +151,13 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
} }
if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) { if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
sch->stats.drops++; sch->qstats.drops++;
return ret; return ret;
} }
sch->q.qlen++; sch->q.qlen++;
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
return 0; return 0;
} }
...@@ -179,7 +179,7 @@ static unsigned int tbf_drop(struct Qdisc* sch) ...@@ -179,7 +179,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) { if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
sch->q.qlen--; sch->q.qlen--;
sch->stats.drops++; sch->qstats.drops++;
} }
return len; return len;
} }
...@@ -250,11 +250,11 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) ...@@ -250,11 +250,11 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
/* When requeue fails skb is dropped */ /* When requeue fails skb is dropped */
sch->q.qlen--; sch->q.qlen--;
sch->stats.drops++; sch->qstats.drops++;
} }
sch->flags |= TCQ_F_THROTTLED; sch->flags |= TCQ_F_THROTTLED;
sch->stats.overlimits++; sch->qstats.overlimits++;
} }
return NULL; return NULL;
} }
......
...@@ -96,14 +96,14 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -96,14 +96,14 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
__skb_queue_tail(&q->q, skb); __skb_queue_tail(&q->q, skb);
if (q->q.qlen <= dev->tx_queue_len) { if (q->q.qlen <= dev->tx_queue_len) {
sch->stats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->stats.packets++; sch->bstats.packets++;
return 0; return 0;
} }
__skb_unlink(skb, &q->q); __skb_unlink(skb, &q->q);
kfree_skb(skb); kfree_skb(skb);
sch->stats.drops++; sch->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment