Commit 7ca60095 authored by Patrick McHardy's avatar Patrick McHardy

[PKT_SCHED]: fix CONFIG_NET_CLS_ACT skb leaks in HFSC/CBQ

Both HFSC and CBQ leak unclassified skbs with CONFIG_NET_CLS_ACT.
Move freeing to enqueue where it belongs. Same change for in HTB/prio,
they just don't leak because they don't have unclassified packets.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
parent b8df0350
...@@ -241,7 +241,7 @@ cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) ...@@ -241,7 +241,7 @@ cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
*/ */
static struct cbq_class * static struct cbq_class *
cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres) cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *head = &q->link; struct cbq_class *head = &q->link;
...@@ -257,11 +257,9 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres) ...@@ -257,11 +257,9 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
(cl = cbq_class_lookup(q, prio)) != NULL) (cl = cbq_class_lookup(q, prio)) != NULL)
return cl; return cl;
*qerr = NET_XMIT_DROP;
for (;;) { for (;;) {
int result = 0; int result = 0;
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
#endif
defmap = head->defaults; defmap = head->defaults;
/* /*
...@@ -282,27 +280,13 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres) ...@@ -282,27 +280,13 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
switch (result) { switch (result) {
case TC_ACT_SHOT: /* Stop and kfree */
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
terminal = 1; *qerr = NET_XMIT_SUCCESS;
break; case TC_ACT_SHOT:
case TC_ACT_RECLASSIFY: /* Things look good */
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL; return NULL;
} }
#else #elif defined(CONFIG_NET_CLS_POLICE)
#ifdef CONFIG_NET_CLS_POLICE
switch (result) { switch (result) {
case TC_POLICE_RECLASSIFY: case TC_POLICE_RECLASSIFY:
return cbq_reclassify(skb, cl); return cbq_reclassify(skb, cl);
...@@ -311,7 +295,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres) ...@@ -311,7 +295,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
default: default:
break; break;
} }
#endif
#endif #endif
if (cl->level == 0) if (cl->level == 0)
return cl; return cl;
...@@ -423,13 +406,19 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -423,13 +406,19 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
int len = skb->len; int len = skb->len;
int ret = NET_XMIT_SUCCESS; int ret;
struct cbq_class *cl = cbq_classify(skb, sch,&ret); struct cbq_class *cl = cbq_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
q->rx_class = cl; q->rx_class = cl;
#endif #endif
if (cl) { if (cl == NULL) {
if (ret == NET_XMIT_DROP)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
}
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
cl->q->__parent = sch; cl->q->__parent = sch;
#endif #endif
...@@ -442,26 +431,10 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -442,26 +431,10 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cbq_activate_class(cl); cbq_activate_class(cl);
return ret; return ret;
} }
}
#ifndef CONFIG_NET_CLS_ACT
sch->qstats.drops++; sch->qstats.drops++;
if (cl == NULL)
kfree_skb(skb);
else {
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
cl->qstats.drops++; cl->qstats.drops++;
}
#else
if ( NET_XMIT_DROP == ret) {
sch->qstats.drops++;
}
if (cl != NULL) {
cbq_mark_toplevel(q, cl);
cl->qstats.drops++;
}
#endif
return ret; return ret;
} }
......
...@@ -1214,7 +1214,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg) ...@@ -1214,7 +1214,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
} }
static struct hfsc_class * static struct hfsc_class *
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres) hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{ {
struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl; struct hfsc_class *cl;
...@@ -1227,35 +1227,20 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres) ...@@ -1227,35 +1227,20 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
if (cl->level == 0) if (cl->level == 0)
return cl; return cl;
*qerr = NET_XMIT_DROP;
tcf = q->root.filter_list; tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
switch (result) { switch (result) {
case TC_ACT_SHOT:
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
terminal = 1; *qerr = NET_XMIT_SUCCESS;
break; case TC_ACT_SHOT:
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL; return NULL;
} }
#else #elif defined(CONFIG_NET_CLS_POLICE)
#ifdef CONFIG_NET_CLS_POLICE
if (result == TC_POLICE_SHOT) if (result == TC_POLICE_SHOT)
return NULL; return NULL;
#endif
#endif #endif
if ((cl = (struct hfsc_class *)res.class) == NULL) { if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL) if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
...@@ -1652,27 +1637,19 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1652,27 +1637,19 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
static int static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
int ret = NET_XMIT_SUCCESS; struct hfsc_class *cl;
struct hfsc_class *cl = hfsc_classify(skb, sch, &ret); unsigned int len;
unsigned int len = skb->len;
int err; int err;
cl = hfsc_classify(skb, sch, &err);
#ifdef CONFIG_NET_CLS_ACT
if (cl == NULL) { if (cl == NULL) {
if (NET_XMIT_DROP == ret) { if (err == NET_XMIT_DROP)
sch->qstats.drops++; sch->qstats.drops++;
}
return ret;
}
#else
if (cl == NULL) {
kfree_skb(skb); kfree_skb(skb);
sch->qstats.drops++; return err;
return NET_XMIT_DROP;
} }
#endif
len = skb->len;
err = cl->qdisc->enqueue(skb, cl->qdisc); err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -305,7 +305,7 @@ static inline u32 htb_classid(struct htb_class *cl) ...@@ -305,7 +305,7 @@ static inline u32 htb_classid(struct htb_class *cl)
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC; return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
} }
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres) static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{ {
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl; struct htb_class *cl;
...@@ -321,35 +321,20 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in ...@@ -321,35 +321,20 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
return cl; return cl;
*qerr = NET_XMIT_DROP;
tcf = q->filter_list; tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
switch (result) { switch (result) {
case TC_ACT_SHOT: /* Stop and kfree */
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
terminal = 1; *qerr = NET_XMIT_SUCCESS;
break; case TC_ACT_SHOT:
case TC_ACT_RECLASSIFY: /* Things look good */
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
return NULL; return NULL;
} }
#else #elif defined(CONFIG_NET_CLS_POLICE)
#ifdef CONFIG_NET_CLS_POLICE
if (result == TC_POLICE_SHOT) if (result == TC_POLICE_SHOT)
return NULL; return HTB_DIRECT;
#endif
#endif #endif
if ((cl = (void*)res.class) == NULL) { if ((cl = (void*)res.class) == NULL) {
if (res.classid == sch->handle) if (res.classid == sch->handle)
...@@ -723,37 +708,24 @@ htb_deactivate(struct htb_sched *q,struct htb_class *cl) ...@@ -723,37 +708,24 @@ htb_deactivate(struct htb_sched *q,struct htb_class *cl)
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
int ret = NET_XMIT_SUCCESS; int ret;
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb,sch,&ret); struct htb_class *cl = htb_classify(skb,sch,&ret);
if (cl == HTB_DIRECT) {
#ifdef CONFIG_NET_CLS_ACT /* enqueue to helper queue */
if (cl == HTB_DIRECT ) { if (q->direct_queue.qlen < q->direct_qlen) {
if (q->direct_queue.qlen < q->direct_qlen ) {
__skb_queue_tail(&q->direct_queue, skb); __skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++; q->direct_pkts++;
} }
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) { } else if (!cl) {
if (NET_XMIT_DROP == ret) { if (ret == NET_XMIT_DROP)
sch->qstats.drops++; sch->qstats.drops++;
}
return ret;
}
#else
if (cl == HTB_DIRECT || !cl) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) {
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
} else {
kfree_skb (skb); kfree_skb (skb);
sch->qstats.drops++; return ret;
return NET_XMIT_DROP;
}
}
#endif #endif
else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->qstats.drops++; sch->qstats.drops++;
cl->qstats.drops++; cl->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
......
...@@ -47,37 +47,23 @@ struct prio_sched_data ...@@ -47,37 +47,23 @@ struct prio_sched_data
}; };
static struct Qdisc *prio_classify(struct sk_buff *skb, static struct Qdisc *
struct Qdisc *sch, int *r) prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{ {
struct prio_sched_data *q = qdisc_priv(sch); struct prio_sched_data *q = qdisc_priv(sch);
u32 band = skb->priority; u32 band = skb->priority;
struct tcf_result res; struct tcf_result res;
*qerr = NET_XMIT_DROP;
if (TC_H_MAJ(skb->priority) != sch->handle) { if (TC_H_MAJ(skb->priority) != sch->handle) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
int result = 0, terminal = 0; switch (tc_classify(skb, q->filter_list, &res)) {
result = tc_classify(skb, q->filter_list, &res);
switch (result) {
case TC_ACT_SHOT:
*r = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
terminal = 1; *qerr = NET_XMIT_SUCCESS;
break; case TC_ACT_SHOT:
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
};
if (terminal) {
kfree_skb(skb);
return NULL; return NULL;
} };
if (!q->filter_list ) { if (!q->filter_list ) {
#else #else
...@@ -97,15 +83,20 @@ static struct Qdisc *prio_classify(struct sk_buff *skb, ...@@ -97,15 +83,20 @@ static struct Qdisc *prio_classify(struct sk_buff *skb,
} }
static int static int
prio_enqueue(struct sk_buff *skb, struct Qdisc* sch) prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc;
int ret = NET_XMIT_SUCCESS; int ret;
qdisc = prio_classify(skb, sch, &ret); qdisc = prio_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT
if (NULL == qdisc) if (qdisc == NULL) {
goto dropped; if (ret == NET_XMIT_DROP)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
}
#endif
if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->bstats.bytes += skb->len; sch->bstats.bytes += skb->len;
...@@ -113,17 +104,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -113,17 +104,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc* sch)
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
dropped:
#ifdef CONFIG_NET_CLS_ACT
if (NET_XMIT_DROP == ret) {
#endif
sch->qstats.drops++; sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_ACT
} else {
sch->qstats.overlimits++; /* abuse, but noone uses it */
}
#endif
return ret; return ret;
} }
...@@ -132,18 +113,23 @@ static int ...@@ -132,18 +113,23 @@ static int
prio_requeue(struct sk_buff *skb, struct Qdisc* sch) prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc;
int ret = NET_XMIT_DROP; int ret;
qdisc = prio_classify(skb, sch, &ret); qdisc = prio_classify(skb, sch, &ret);
if (qdisc == NULL) #ifdef CONFIG_NET_CLS_ACT
goto dropped; if (qdisc == NULL) {
if (ret == NET_XMIT_DROP)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
}
#endif
if ((ret = qdisc->ops->requeue(skb, qdisc)) == 0) { if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
sch->qstats.requeues++; sch->qstats.requeues++;
return 0; return 0;
} }
dropped:
sch->qstats.drops++; sch->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment