Commit d0de7309 authored by Jitendra Kalsaria's avatar Jitendra Kalsaria Committed by David S. Miller

qlge: Cleanup atomic queue threshold check.

Signed-off-by: default avatarJitendra Kalsaria <jitendra.kalsaria@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 41812db8
...@@ -1397,7 +1397,6 @@ struct tx_ring { ...@@ -1397,7 +1397,6 @@ struct tx_ring {
struct tx_ring_desc *q; /* descriptor list for the queue */ struct tx_ring_desc *q; /* descriptor list for the queue */
spinlock_t lock; spinlock_t lock;
atomic_t tx_count; /* counts down for every outstanding IO */ atomic_t tx_count; /* counts down for every outstanding IO */
atomic_t queue_stopped; /* Turns queue off when full. */
struct delayed_work tx_work; struct delayed_work tx_work;
struct ql_adapter *qdev; struct ql_adapter *qdev;
u64 tx_packets; u64 tx_packets;
......
...@@ -2171,8 +2171,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) ...@@ -2171,8 +2171,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_write_cq_idx(rx_ring); ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
if (atomic_read(&tx_ring->queue_stopped) && if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
(atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/* /*
* The queue got stopped because the tx_ring was full. * The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty. * Wake it up, because it's now at least 25% empty.
...@@ -2559,7 +2558,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) ...@@ -2559,7 +2558,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
"%s: BUG! shutting down tx queue %d due to lack of resources.\n", "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
__func__, tx_ring_idx); __func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id); netif_stop_subqueue(ndev, tx_ring->wq_id);
atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++; tx_ring->tx_errors++;
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -2688,7 +2686,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) ...@@ -2688,7 +2686,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
tx_ring_desc++; tx_ring_desc++;
} }
atomic_set(&tx_ring->tx_count, tx_ring->wq_len); atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
atomic_set(&tx_ring->queue_stopped, 0);
} }
static void ql_free_tx_resources(struct ql_adapter *qdev, static void ql_free_tx_resources(struct ql_adapter *qdev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment