Commit c27f339a authored by Jarek Poplawski's avatar Jarek Poplawski Committed by David S. Miller

net_sched: Add qdisc __NET_XMIT_BYPASS flag

Patrick McHardy <kaber@trash.net> noticed that it would be nice to
handle NET_XMIT_BYPASS by NET_XMIT_SUCCESS with an internal qdisc flag
__NET_XMIT_BYPASS and to remove the mapping from dev_queue_xmit().

David Miller <davem@davemloft.net> spotted a serious bug in the first
version of this patch.
Signed-off-by: default avatarJarek Poplawski <jarkao2@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 378a2f09
...@@ -343,14 +343,14 @@ static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) ...@@ -343,14 +343,14 @@ static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
return qdisc_skb_cb(skb)->pkt_len; return qdisc_skb_cb(skb)->pkt_len;
} }
#ifdef CONFIG_NET_CLS_ACT /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
/* additional qdisc xmit flags */
enum net_xmit_qdisc_t { enum net_xmit_qdisc_t {
__NET_XMIT_STOLEN = 0x00010000, __NET_XMIT_STOLEN = 0x00010000,
__NET_XMIT_BYPASS = 0x00020000,
}; };
#ifdef CONFIG_NET_CLS_ACT
#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
#else #else
#define net_xmit_drop_count(e) (1) #define net_xmit_drop_count(e) (1)
#endif #endif
......
...@@ -1805,7 +1805,6 @@ int dev_queue_xmit(struct sk_buff *skb) ...@@ -1805,7 +1805,6 @@ int dev_queue_xmit(struct sk_buff *skb)
spin_unlock(root_lock); spin_unlock(root_lock);
rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
goto out; goto out;
} }
......
...@@ -457,7 +457,7 @@ drop: __maybe_unused ...@@ -457,7 +457,7 @@ drop: __maybe_unused
return 0; return 0;
} }
tasklet_schedule(&p->task); tasklet_schedule(&p->task);
return NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
} }
/* /*
......
...@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
(cl = cbq_class_lookup(q, prio)) != NULL) (cl = cbq_class_lookup(q, prio)) != NULL)
return cl; return cl;
*qerr = NET_XMIT_BYPASS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
for (;;) { for (;;) {
int result = 0; int result = 0;
defmap = head->defaults; defmap = head->defaults;
...@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->rx_class = cl; q->rx_class = cl;
#endif #endif
if (cl == NULL) { if (cl == NULL) {
if (ret == NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
......
...@@ -268,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -268,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
drop: drop:
kfree_skb(skb); kfree_skb(skb);
sch->qstats.drops++; sch->qstats.drops++;
return NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
} }
static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
......
...@@ -1159,7 +1159,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -1159,7 +1159,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (cl->level == 0) if (cl->level == 0)
return cl; return cl;
*qerr = NET_XMIT_BYPASS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
tcf = q->root.filter_list; tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
...@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = hfsc_classify(skb, sch, &err); cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) { if (cl == NULL) {
if (err == NET_XMIT_BYPASS) if (err & __NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return err; return err;
......
...@@ -214,7 +214,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, ...@@ -214,7 +214,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
return cl; return cl;
*qerr = NET_XMIT_BYPASS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
tcf = q->filter_list; tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
...@@ -567,7 +567,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -567,7 +567,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
} else if (!cl) { } else if (!cl) {
if (ret == NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
...@@ -612,7 +612,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -612,7 +612,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
} }
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
} else if (!cl) { } else if (!cl) {
if (ret == NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
......
...@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (count == 0) { if (count == 0) {
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
} }
skb_orphan(skb); skb_orphan(skb);
......
...@@ -38,7 +38,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -38,7 +38,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
struct tcf_result res; struct tcf_result res;
int err; int err;
*qerr = NET_XMIT_BYPASS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) { if (TC_H_MAJ(skb->priority) != sch->handle) {
err = tc_classify(skb, q->filter_list, &res); err = tc_classify(skb, q->filter_list, &res);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
...@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) { if (qdisc == NULL) {
if (ret == NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
...@@ -103,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -103,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
qdisc = prio_classify(skb, sch, &ret); qdisc = prio_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) { if (qdisc == NULL) {
if (ret == NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
......
...@@ -171,7 +171,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, ...@@ -171,7 +171,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
if (!q->filter_list) if (!q->filter_list)
return sfq_hash(q, skb) + 1; return sfq_hash(q, skb) + 1;
*qerr = NET_XMIT_BYPASS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
result = tc_classify(skb, q->filter_list, &res); result = tc_classify(skb, q->filter_list, &res);
if (result >= 0) { if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
...@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
hash = sfq_classify(skb, sch, &ret); hash = sfq_classify(skb, sch, &ret);
if (hash == 0) { if (hash == 0) {
if (ret == NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
...@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
hash = sfq_classify(skb, sch, &ret); hash = sfq_classify(skb, sch, &ret);
if (hash == 0) { if (hash == 0) {
if (ret == NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment