Commit d2b21b58 authored by Jamal Hadi Salim's avatar Jamal Hadi Salim Committed by David S. Miller

[PKT_SCHED]: Pass NET_XMIT_* status properly back through classifiers.

parent 07f0a148
...@@ -238,7 +238,7 @@ cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) ...@@ -238,7 +238,7 @@ cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
*/ */
static struct cbq_class * static struct cbq_class *
cbq_classify(struct sk_buff *skb, struct Qdisc *sch) cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{ {
struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data; struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct cbq_class *head = &q->link; struct cbq_class *head = &q->link;
...@@ -256,7 +256,9 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch) ...@@ -256,7 +256,9 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
for (;;) { for (;;) {
int result = 0; int result = 0;
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
#endif
defmap = head->defaults; defmap = head->defaults;
/* /*
...@@ -275,6 +277,28 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch) ...@@ -275,6 +277,28 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
goto fallback; goto fallback;
} }
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_SHOT: /* Stop and kfree */
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY: /* Things look good */
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
switch (result) { switch (result) {
case TC_POLICE_RECLASSIFY: case TC_POLICE_RECLASSIFY:
...@@ -284,6 +308,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch) ...@@ -284,6 +308,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
default: default:
break; break;
} }
#endif
#endif #endif
if (cl->level == 0) if (cl->level == 0)
return cl; return cl;
...@@ -394,9 +419,9 @@ static int ...@@ -394,9 +419,9 @@ static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data; struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = cbq_classify(skb, sch);
int len = skb->len; int len = skb->len;
int ret = NET_XMIT_POLICED; int ret = NET_XMIT_SUCCESS;
struct cbq_class *cl = cbq_classify(skb, sch,&ret);
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
q->rx_class = cl; q->rx_class = cl;
...@@ -405,17 +430,18 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -405,17 +430,18 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
cl->q->__parent = sch; cl->q->__parent = sch;
#endif #endif
if ((ret = cl->q->enqueue(skb, cl->q)) == 0) { if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
sch->stats.packets++; sch->stats.packets++;
sch->stats.bytes+=len; sch->stats.bytes+=len;
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
return 0; return ret;
} }
} }
#ifndef CONFIG_NET_CLS_ACT
sch->stats.drops++; sch->stats.drops++;
if (cl == NULL) if (cl == NULL)
kfree_skb(skb); kfree_skb(skb);
...@@ -423,6 +449,16 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -423,6 +449,16 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
cl->stats.drops++; cl->stats.drops++;
} }
#else
if ( NET_XMIT_DROP == ret) {
sch->stats.drops++;
}
if (cl != NULL) {
cbq_mark_toplevel(q, cl);
cl->stats.drops++;
}
#endif
return ret; return ret;
} }
......
...@@ -1235,7 +1235,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg) ...@@ -1235,7 +1235,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
} }
static struct hfsc_class * static struct hfsc_class *
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch) hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{ {
struct hfsc_sched *q = (struct hfsc_sched *)sch->data; struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl; struct hfsc_class *cl;
...@@ -1250,9 +1250,33 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1250,9 +1250,33 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch)
tcf = q->root.filter_list; tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
switch (result) {
case TC_ACT_SHOT:
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
if (result == TC_POLICE_SHOT) if (result == TC_POLICE_SHOT)
return NULL; return NULL;
#endif
#endif #endif
if ((cl = (struct hfsc_class *)res.class) == NULL) { if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL) if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
...@@ -1660,15 +1684,26 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1660,15 +1684,26 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
static int static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct hfsc_class *cl = hfsc_classify(skb, sch); int ret = NET_XMIT_SUCCESS;
struct hfsc_class *cl = hfsc_classify(skb, sch, &ret);
unsigned int len = skb->len; unsigned int len = skb->len;
int err; int err;
#ifdef CONFIG_NET_CLS_ACT
if (cl == NULL) {
if (NET_XMIT_DROP == ret) {
sch->stats.drops++;
}
return ret;
}
#else
if (cl == NULL) { if (cl == NULL) {
kfree_skb(skb); kfree_skb(skb);
sch->stats.drops++; sch->stats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
#endif
err = cl->qdisc->enqueue(skb, cl->qdisc); err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
...@@ -1763,6 +1798,9 @@ hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1763,6 +1798,9 @@ hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct hfsc_sched *q = (struct hfsc_sched *)sch->data; struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
#ifdef CONFIG_NET_CLS_ACT
sch->stats.reqs++;
#endif
__skb_queue_head(&q->requeue, skb); __skb_queue_head(&q->requeue, skb);
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
......
...@@ -297,7 +297,7 @@ static inline u32 htb_classid(struct htb_class *cl) ...@@ -297,7 +297,7 @@ static inline u32 htb_classid(struct htb_class *cl)
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC; return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
} }
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch) static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{ {
struct htb_sched *q = (struct htb_sched *)sch->data; struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl; struct htb_class *cl;
...@@ -315,9 +315,33 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch) ...@@ -315,9 +315,33 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
tcf = q->filter_list; tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
switch (result) {
case TC_ACT_SHOT: /* Stop and kfree */
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY: /* Things look good */
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
if (result == TC_POLICE_SHOT) if (result == TC_POLICE_SHOT)
return NULL; return NULL;
#endif
#endif #endif
if ((cl = (void*)res.class) == NULL) { if ((cl = (void*)res.class) == NULL) {
if (res.classid == sch->handle) if (res.classid == sch->handle)
...@@ -686,9 +710,24 @@ htb_deactivate(struct htb_sched *q,struct htb_class *cl) ...@@ -686,9 +710,24 @@ htb_deactivate(struct htb_sched *q,struct htb_class *cl)
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
int ret = NET_XMIT_SUCCESS;
struct htb_sched *q = (struct htb_sched *)sch->data; struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_classify(skb,sch); struct htb_class *cl = htb_classify(skb,sch,&ret);
#ifdef CONFIG_NET_CLS_ACT
if (cl == HTB_DIRECT ) {
if (q->direct_queue.qlen < q->direct_qlen ) {
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
}
} else if (!cl) {
if (NET_XMIT_DROP == ret) {
sch->stats.drops++;
}
return ret;
}
#else
if (cl == HTB_DIRECT || !cl) { if (cl == HTB_DIRECT || !cl) {
/* enqueue to helper queue */ /* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) { if (q->direct_queue.qlen < q->direct_qlen && cl) {
...@@ -699,7 +738,9 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -699,7 +738,9 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->stats.drops++; sch->stats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { }
#endif
else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->stats.drops++; sch->stats.drops++;
cl->stats.drops++; cl->stats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
...@@ -718,9 +759,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -718,9 +759,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct htb_sched *q = (struct htb_sched *)sch->data; struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_classify(skb,sch); int ret = NET_XMIT_SUCCESS;
struct htb_class *cl = htb_classify(skb,sch, &ret);
struct sk_buff *tskb; struct sk_buff *tskb;
#ifdef CONFIG_NET_CLS_ACT
sch->stats.reqs++;
#endif
if (cl == HTB_DIRECT || !cl) { if (cl == HTB_DIRECT || !cl) {
/* enqueue to helper queue */ /* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) { if (q->direct_queue.qlen < q->direct_qlen && cl) {
......
...@@ -50,28 +50,33 @@ struct prio_sched_data ...@@ -50,28 +50,33 @@ struct prio_sched_data
struct Qdisc *prio_classify(struct sk_buff *skb, struct Qdisc *sch,int *r) struct Qdisc *prio_classify(struct sk_buff *skb, struct Qdisc *sch,int *r)
{ {
struct prio_sched_data *q = (struct prio_sched_data *)sch->data; struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
u32 band = skb->priority;
struct tcf_result res; struct tcf_result res;
u32 band;
int result = 0;
band = skb->priority;
if (TC_H_MAJ(skb->priority) != sch->handle) { if (TC_H_MAJ(skb->priority) != sch->handle) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
*r = result = tc_classify(skb, q->filter_list, &res); int result = 0, terminal = 0;
result = tc_classify(skb, q->filter_list, &res);
switch (result) { switch (result) {
case TC_ACT_SHOT: case TC_ACT_SHOT:
*r = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
kfree_skb(skb); terminal = 1;
return NULL; break;
case TC_ACT_RECLASSIFY: case TC_ACT_RECLASSIFY:
case TC_ACT_OK: case TC_ACT_OK:
case TC_ACT_UNSPEC: case TC_ACT_UNSPEC:
default: default:
break; break;
}; };
if (terminal) {
kfree_skb(skb);
return NULL;
}
if (!q->filter_list ) { if (!q->filter_list ) {
#else #else
...@@ -94,35 +99,31 @@ static int ...@@ -94,35 +99,31 @@ static int
prio_enqueue(struct sk_buff *skb, struct Qdisc* sch) prio_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc;
int ret; int ret = NET_XMIT_SUCCESS;
/* moving these up here changes things from before
* packets counted now include everything that was ever
* seen
*/
sch->stats.bytes += skb->len;
sch->stats.packets++;
qdisc = prio_classify(skb, sch, &ret); qdisc = prio_classify(skb, sch, &ret);
if (NULL == qdisc) if (NULL == qdisc)
goto dropped; goto dropped;
if ((ret = qdisc->enqueue(skb, qdisc)) == 0) { if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->stats.bytes += skb->len;
sch->stats.packets++;
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
dropped: dropped:
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (TC_ACT_SHOT == ret || NET_XMIT_DROP == ret) { if (NET_XMIT_DROP == ret) {
#endif #endif
sch->stats.drops++; sch->stats.drops++;
return NET_XMIT_DROP;
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
} else { } else {
sch->stats.overlimits++; /* abuse, but noone uses it */ sch->stats.overlimits++; /* abuse, but noone uses it */
return NET_XMIT_BYPASS; /* we dont want to confuse TCP */
} }
#endif #endif
return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment