Commit eb6aafe3 authored by David S. Miller's avatar David S. Miller

pkt_sched: Make qdisc_run take a netdev_queue.

This allows us to use this calling convention all the way down into
qdisc_restart().
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 86d804e1
...@@ -84,13 +84,15 @@ extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, ...@@ -84,13 +84,15 @@ extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
struct nlattr *tab); struct nlattr *tab);
extern void qdisc_put_rtab(struct qdisc_rate_table *tab); extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
extern void __qdisc_run(struct net_device *dev); extern void __qdisc_run(struct netdev_queue *txq);
static inline void qdisc_run(struct net_device *dev) static inline void qdisc_run(struct netdev_queue *txq)
{ {
struct net_device *dev = txq->dev;
if (!netif_queue_stopped(dev) && if (!netif_queue_stopped(dev) &&
!test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
__qdisc_run(dev); __qdisc_run(txq);
} }
extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
......
...@@ -1734,7 +1734,7 @@ int dev_queue_xmit(struct sk_buff *skb) ...@@ -1734,7 +1734,7 @@ int dev_queue_xmit(struct sk_buff *skb)
/* reset queue_mapping to zero */ /* reset queue_mapping to zero */
skb_set_queue_mapping(skb, 0); skb_set_queue_mapping(skb, 0);
rc = q->enqueue(skb, q); rc = q->enqueue(skb, q);
qdisc_run(dev); qdisc_run(txq);
spin_unlock(&txq->lock); spin_unlock(&txq->lock);
rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
...@@ -1930,7 +1930,7 @@ static void net_tx_action(struct softirq_action *h) ...@@ -1930,7 +1930,7 @@ static void net_tx_action(struct softirq_action *h)
clear_bit(__LINK_STATE_SCHED, &dev->state); clear_bit(__LINK_STATE_SCHED, &dev->state);
if (spin_trylock(&txq->lock)) { if (spin_trylock(&txq->lock)) {
qdisc_run(dev); qdisc_run(txq);
spin_unlock(&txq->lock); spin_unlock(&txq->lock);
} else { } else {
netif_schedule_queue(txq); netif_schedule_queue(txq);
......
...@@ -75,9 +75,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb, ...@@ -75,9 +75,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
return 0; return 0;
} }
static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue,
struct netdev_queue *dev_queue, struct Qdisc *q)
struct Qdisc *q)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -90,10 +89,10 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, ...@@ -90,10 +89,10 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
} }
static inline int handle_dev_cpu_collision(struct sk_buff *skb, static inline int handle_dev_cpu_collision(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *dev_queue, struct netdev_queue *dev_queue,
struct Qdisc *q) struct Qdisc *q)
{ {
struct net_device *dev = dev_queue->dev;
int ret; int ret;
if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
...@@ -139,21 +138,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, ...@@ -139,21 +138,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
* >0 - queue is not empty. * >0 - queue is not empty.
* *
*/ */
static inline int qdisc_restart(struct net_device *dev) static inline int qdisc_restart(struct netdev_queue *txq)
{ {
struct netdev_queue *txq = &dev->tx_queue;
struct Qdisc *q = txq->qdisc; struct Qdisc *q = txq->qdisc;
struct sk_buff *skb;
int ret = NETDEV_TX_BUSY; int ret = NETDEV_TX_BUSY;
struct net_device *dev;
struct sk_buff *skb;
/* Dequeue packet */ /* Dequeue packet */
if (unlikely((skb = dev_dequeue_skb(dev, txq, q)) == NULL)) if (unlikely((skb = dequeue_skb(txq, q)) == NULL))
return 0; return 0;
/* And release queue */ /* And release queue */
spin_unlock(&txq->lock); spin_unlock(&txq->lock);
dev = txq->dev;
HARD_TX_LOCK(dev, smp_processor_id()); HARD_TX_LOCK(dev, smp_processor_id());
if (!netif_subqueue_stopped(dev, skb)) if (!netif_subqueue_stopped(dev, skb))
ret = dev_hard_start_xmit(skb, dev); ret = dev_hard_start_xmit(skb, dev);
...@@ -170,7 +171,7 @@ static inline int qdisc_restart(struct net_device *dev) ...@@ -170,7 +171,7 @@ static inline int qdisc_restart(struct net_device *dev)
case NETDEV_TX_LOCKED: case NETDEV_TX_LOCKED:
/* Driver try lock failed */ /* Driver try lock failed */
ret = handle_dev_cpu_collision(skb, dev, txq, q); ret = handle_dev_cpu_collision(skb, txq, q);
break; break;
default: default:
...@@ -186,11 +187,12 @@ static inline int qdisc_restart(struct net_device *dev) ...@@ -186,11 +187,12 @@ static inline int qdisc_restart(struct net_device *dev)
return ret; return ret;
} }
void __qdisc_run(struct net_device *dev) void __qdisc_run(struct netdev_queue *txq)
{ {
struct net_device *dev = txq->dev;
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
while (qdisc_restart(dev)) { while (qdisc_restart(txq)) {
if (netif_queue_stopped(dev)) if (netif_queue_stopped(dev))
break; break;
...@@ -200,7 +202,7 @@ void __qdisc_run(struct net_device *dev) ...@@ -200,7 +202,7 @@ void __qdisc_run(struct net_device *dev)
* 2. we've been doing it for too long. * 2. we've been doing it for too long.
*/ */
if (need_resched() || jiffies != start_time) { if (need_resched() || jiffies != start_time) {
netif_schedule_queue(&dev->tx_queue); netif_schedule_queue(txq);
break; break;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment