Commit 5a0d2268 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: add netif_tx_queue_frozen_or_stopped

When testing struct netdev_queue state against FROZEN bit, we also test
XOFF bit. We can test both bits at once and save some cycles.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d3c15cab
...@@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n) ...@@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n)
enum netdev_queue_state_t { enum netdev_queue_state_t {
__QUEUE_STATE_XOFF, __QUEUE_STATE_XOFF,
__QUEUE_STATE_FROZEN, __QUEUE_STATE_FROZEN,
#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
(1 << __QUEUE_STATE_FROZEN))
}; };
struct netdev_queue { struct netdev_queue {
...@@ -1629,9 +1631,9 @@ static inline int netif_queue_stopped(const struct net_device *dev) ...@@ -1629,9 +1631,9 @@ static inline int netif_queue_stopped(const struct net_device *dev)
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
} }
static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
{ {
return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
} }
/** /**
......
...@@ -76,8 +76,7 @@ static void queue_process(struct work_struct *work) ...@@ -76,8 +76,7 @@ static void queue_process(struct work_struct *work)
local_irq_save(flags); local_irq_save(flags);
__netif_tx_lock(txq, smp_processor_id()); __netif_tx_lock(txq, smp_processor_id());
if (netif_tx_queue_stopped(txq) || if (netif_tx_queue_frozen_or_stopped(txq) ||
netif_tx_queue_frozen(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb); skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq); __netif_tx_unlock(txq);
......
...@@ -3527,7 +3527,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3527,7 +3527,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
__netif_tx_lock_bh(txq); __netif_tx_lock_bh(txq);
if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
ret = NETDEV_TX_BUSY; ret = NETDEV_TX_BUSY;
pkt_dev->last_ok = 0; pkt_dev->last_ok = 0;
goto unlock; goto unlock;
......
...@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) ...@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
/* check the reason of requeuing without tx lock first */ /* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
if (!netif_tx_queue_stopped(txq) && if (!netif_tx_queue_frozen_or_stopped(txq)) {
!netif_tx_queue_frozen(txq)) {
q->gso_skb = NULL; q->gso_skb = NULL;
q->q.qlen--; q->q.qlen--;
} else } else
...@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ...@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spin_unlock(root_lock); spin_unlock(root_lock);
HARD_TX_LOCK(dev, txq, smp_processor_id()); HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) if (!netif_tx_queue_frozen_or_stopped(txq))
ret = dev_hard_start_xmit(skb, dev, txq); ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq); HARD_TX_UNLOCK(dev, txq);
...@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ...@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
ret = dev_requeue_skb(skb, q); ret = dev_requeue_skb(skb, q);
} }
if (ret && (netif_tx_queue_stopped(txq) || if (ret && netif_tx_queue_frozen_or_stopped(txq))
netif_tx_queue_frozen(txq)))
ret = 0; ret = 0;
return ret; return ret;
......
...@@ -309,8 +309,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -309,8 +309,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
if (__netif_tx_trylock(slave_txq)) { if (__netif_tx_trylock(slave_txq)) {
unsigned int length = qdisc_pkt_len(skb); unsigned int length = qdisc_pkt_len(skb);
if (!netif_tx_queue_stopped(slave_txq) && if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
!netif_tx_queue_frozen(slave_txq) &&
slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
txq_trans_update(slave_txq); txq_trans_update(slave_txq);
__netif_tx_unlock(slave_txq); __netif_tx_unlock(slave_txq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment