Commit d2b21b58 authored by Jamal Hadi Salim's avatar Jamal Hadi Salim Committed by David S. Miller

[PKT_SCHED]: Pass NET_XMIT_* status properly back through classifiers.

parent 07f0a148
......@@ -238,7 +238,7 @@ cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
*/
static struct cbq_class *
cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct cbq_class *head = &q->link;
......@@ -256,7 +256,9 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
for (;;) {
int result = 0;
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
#endif
defmap = head->defaults;
/*
......@@ -275,6 +277,28 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
goto fallback;
}
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_SHOT: /* Stop and kfree */
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY: /* Things look good */
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE
switch (result) {
case TC_POLICE_RECLASSIFY:
......@@ -284,6 +308,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
default:
break;
}
#endif
#endif
if (cl->level == 0)
return cl;
......@@ -394,9 +419,9 @@ static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = cbq_classify(skb, sch);
int len = skb->len;
int ret = NET_XMIT_POLICED;
int ret = NET_XMIT_SUCCESS;
struct cbq_class *cl = cbq_classify(skb, sch,&ret);
#ifdef CONFIG_NET_CLS_POLICE
q->rx_class = cl;
......@@ -405,17 +430,18 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_POLICE
cl->q->__parent = sch;
#endif
if ((ret = cl->q->enqueue(skb, cl->q)) == 0) {
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
sch->q.qlen++;
sch->stats.packets++;
sch->stats.bytes+=len;
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
return ret;
}
}
#ifndef CONFIG_NET_CLS_ACT
sch->stats.drops++;
if (cl == NULL)
kfree_skb(skb);
......@@ -423,6 +449,16 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cbq_mark_toplevel(q, cl);
cl->stats.drops++;
}
#else
if ( NET_XMIT_DROP == ret) {
sch->stats.drops++;
}
if (cl != NULL) {
cbq_mark_toplevel(q, cl);
cl->stats.drops++;
}
#endif
return ret;
}
......
......@@ -1235,7 +1235,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
}
static struct hfsc_class *
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch)
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
......@@ -1250,9 +1250,33 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch)
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
switch (result) {
case TC_ACT_SHOT:
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE
if (result == TC_POLICE_SHOT)
return NULL;
#endif
#endif
if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
......@@ -1660,15 +1684,26 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct hfsc_class *cl = hfsc_classify(skb, sch);
int ret = NET_XMIT_SUCCESS;
struct hfsc_class *cl = hfsc_classify(skb, sch, &ret);
unsigned int len = skb->len;
int err;
#ifdef CONFIG_NET_CLS_ACT
if (cl == NULL) {
if (NET_XMIT_DROP == ret) {
sch->stats.drops++;
}
return ret;
}
#else
if (cl == NULL) {
kfree_skb(skb);
sch->stats.drops++;
return NET_XMIT_DROP;
}
#endif
err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
......@@ -1763,6 +1798,9 @@ hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
#ifdef CONFIG_NET_CLS_ACT
sch->stats.reqs++;
#endif
__skb_queue_head(&q->requeue, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
......
......@@ -297,7 +297,7 @@ static inline u32 htb_classid(struct htb_class *cl)
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
}
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl;
......@@ -315,9 +315,33 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
switch (result) {
case TC_ACT_SHOT: /* Stop and kfree */
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY: /* Things look good */
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE
if (result == TC_POLICE_SHOT)
return NULL;
#endif
#endif
if ((cl = (void*)res.class) == NULL) {
if (res.classid == sch->handle)
......@@ -686,9 +710,24 @@ htb_deactivate(struct htb_sched *q,struct htb_class *cl)
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
int ret = NET_XMIT_SUCCESS;
struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_classify(skb,sch);
struct htb_class *cl = htb_classify(skb,sch,&ret);
#ifdef CONFIG_NET_CLS_ACT
if (cl == HTB_DIRECT ) {
if (q->direct_queue.qlen < q->direct_qlen ) {
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
}
} else if (!cl) {
if (NET_XMIT_DROP == ret) {
sch->stats.drops++;
}
return ret;
}
#else
if (cl == HTB_DIRECT || !cl) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) {
......@@ -699,7 +738,9 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->stats.drops++;
return NET_XMIT_DROP;
}
} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
}
#endif
else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->stats.drops++;
cl->stats.drops++;
return NET_XMIT_DROP;
......@@ -718,9 +759,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_classify(skb,sch);
int ret = NET_XMIT_SUCCESS;
struct htb_class *cl = htb_classify(skb,sch, &ret);
struct sk_buff *tskb;
#ifdef CONFIG_NET_CLS_ACT
sch->stats.reqs++;
#endif
if (cl == HTB_DIRECT || !cl) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) {
......
......@@ -50,28 +50,33 @@ struct prio_sched_data
struct Qdisc *prio_classify(struct sk_buff *skb, struct Qdisc *sch,int *r)
{
struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
u32 band = skb->priority;
struct tcf_result res;
u32 band;
int result = 0;
band = skb->priority;
if (TC_H_MAJ(skb->priority) != sch->handle) {
#ifdef CONFIG_NET_CLS_ACT
*r = result = tc_classify(skb, q->filter_list, &res);
int result = 0, terminal = 0;
result = tc_classify(skb, q->filter_list, &res);
switch (result) {
case TC_ACT_SHOT:
*r = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
kfree_skb(skb);
return NULL;
terminal = 1;
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
};
if (terminal) {
kfree_skb(skb);
return NULL;
}
if (!q->filter_list ) {
#else
......@@ -94,35 +99,31 @@ static int
prio_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct Qdisc *qdisc;
int ret;
/* moving these up here changes things from before
* packets counted now include everything that was ever
* seen
*/
sch->stats.bytes += skb->len;
sch->stats.packets++;
int ret = NET_XMIT_SUCCESS;
qdisc = prio_classify(skb, sch, &ret);
if (NULL == qdisc)
goto dropped;
if ((ret = qdisc->enqueue(skb, qdisc)) == 0) {
if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->stats.bytes += skb->len;
sch->stats.packets++;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
dropped:
#ifdef CONFIG_NET_CLS_ACT
if (TC_ACT_SHOT == ret || NET_XMIT_DROP == ret) {
if (NET_XMIT_DROP == ret) {
#endif
sch->stats.drops++;
return NET_XMIT_DROP;
#ifdef CONFIG_NET_CLS_ACT
} else {
sch->stats.overlimits++; /* abuse, but noone uses it */
return NET_XMIT_BYPASS; /* we dont want to confuse TCP */
}
#endif
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment