Commit 5e0b8928 authored by Íñigo Huguet's avatar Íñigo Huguet Committed by David S. Miller

net:cxgb3: replace tasklets with works

OFLD and CTRL TX queues can be stopped if there is no room in
their DMA rings. If this happens, they're tried to be restarted
later after having made some room in the corresponding ring.

The tasks of restarting these queues were triggered using
tasklets, but they can be replaced for workqueue works, getting
them out of softirq context.

This queues stop/restart probably doesn't happen often and they
can be quite lengthy because they try to send all pending skbs.
Moreover, given that probably the ring is not empty yet, so the
DMA still has work to do, we don't need to be so fast to justify
using tasklets/softirq instead of running in a thread.
Signed-off-by: default avatarÍñigo Huguet <ihuguet@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a29cb691
...@@ -178,7 +178,7 @@ struct sge_txq { /* state for an SGE Tx queue */ ...@@ -178,7 +178,7 @@ struct sge_txq { /* state for an SGE Tx queue */
unsigned int token; /* WR token */ unsigned int token; /* WR token */
dma_addr_t phys_addr; /* physical address of the ring */ dma_addr_t phys_addr; /* physical address of the ring */
struct sk_buff_head sendq; /* List of backpressured offload packets */ struct sk_buff_head sendq; /* List of backpressured offload packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */ struct work_struct qresume_task; /* restarts the queue */
unsigned int cntxt_id; /* SGE context id for the Tx q */ unsigned int cntxt_id; /* SGE context id for the Tx q */
unsigned long stops; /* # of times q has been stopped */ unsigned long stops; /* # of times q has been stopped */
unsigned long restarts; /* # of queue restarts */ unsigned long restarts; /* # of queue restarts */
......
...@@ -770,4 +770,6 @@ int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, ...@@ -770,4 +770,6 @@ int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops); int phy_addr, const struct mdio_ops *mdio_ops);
int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops); int phy_addr, const struct mdio_ops *mdio_ops);
extern struct workqueue_struct *cxgb3_wq;
#endif /* __CHELSIO_COMMON_H */ #endif /* __CHELSIO_COMMON_H */
...@@ -1518,14 +1518,15 @@ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, ...@@ -1518,14 +1518,15 @@ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
/** /**
* restart_ctrlq - restart a suspended control queue * restart_ctrlq - restart a suspended control queue
* @t: pointer to the tasklet associated with this handler * @w: pointer to the work associated with this handler
* *
* Resumes transmission on a suspended Tx control queue. * Resumes transmission on a suspended Tx control queue.
*/ */
static void restart_ctrlq(struct tasklet_struct *t) static void restart_ctrlq(struct work_struct *w)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk); struct sge_qset *qs = container_of(w, struct sge_qset,
txq[TXQ_CTRL].qresume_task);
struct sge_txq *q = &qs->txq[TXQ_CTRL]; struct sge_txq *q = &qs->txq[TXQ_CTRL];
spin_lock(&q->lock); spin_lock(&q->lock);
...@@ -1736,14 +1737,15 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); ...@@ -1736,14 +1737,15 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
/** /**
* restart_offloadq - restart a suspended offload queue * restart_offloadq - restart a suspended offload queue
* @t: pointer to the tasklet associated with this handler * @w: pointer to the work associated with this handler
* *
* Resumes transmission on a suspended Tx offload queue. * Resumes transmission on a suspended Tx offload queue.
*/ */
static void restart_offloadq(struct tasklet_struct *t) static void restart_offloadq(struct work_struct *w)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk); struct sge_qset *qs = container_of(w, struct sge_qset,
txq[TXQ_OFLD].qresume_task);
struct sge_txq *q = &qs->txq[TXQ_OFLD]; struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev); const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter; struct adapter *adap = pi->adapter;
...@@ -1998,13 +2000,17 @@ static void restart_tx(struct sge_qset *qs) ...@@ -1998,13 +2000,17 @@ static void restart_tx(struct sge_qset *qs)
should_restart_tx(&qs->txq[TXQ_OFLD]) && should_restart_tx(&qs->txq[TXQ_OFLD]) &&
test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
qs->txq[TXQ_OFLD].restarts++; qs->txq[TXQ_OFLD].restarts++;
tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
/* The work can be quite lengthy so we use driver's own queue */
queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
} }
if (test_bit(TXQ_CTRL, &qs->txq_stopped) && if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
should_restart_tx(&qs->txq[TXQ_CTRL]) && should_restart_tx(&qs->txq[TXQ_CTRL]) &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
qs->txq[TXQ_CTRL].restarts++; qs->txq[TXQ_CTRL].restarts++;
tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
/* The work can be quite lengthy so we use driver's own queue */
queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
} }
} }
...@@ -3085,8 +3091,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, ...@@ -3085,8 +3091,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
skb_queue_head_init(&q->txq[i].sendq); skb_queue_head_init(&q->txq[i].sendq);
} }
tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq); INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq); INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
q->fl[0].gen = q->fl[1].gen = 1; q->fl[0].gen = q->fl[1].gen = 1;
q->fl[0].size = p->fl_size; q->fl[0].size = p->fl_size;
...@@ -3276,11 +3282,11 @@ void t3_sge_start(struct adapter *adap) ...@@ -3276,11 +3282,11 @@ void t3_sge_start(struct adapter *adap)
* *
* Can be invoked from interrupt context e.g. error handler. * Can be invoked from interrupt context e.g. error handler.
* *
* Note that this function cannot disable the restart of tasklets as * Note that this function cannot disable the restart of works as
* it cannot wait if called from interrupt context, however the * it cannot wait if called from interrupt context, however the
* tasklets will have no effect since the doorbells are disabled. The * works will have no effect since the doorbells are disabled. The
* driver will call tg3_sge_stop() later from process context, at * driver will call tg3_sge_stop() later from process context, at
* which time the tasklets will be stopped if they are still running. * which time the works will be stopped if they are still running.
*/ */
void t3_sge_stop_dma(struct adapter *adap) void t3_sge_stop_dma(struct adapter *adap)
{ {
...@@ -3292,7 +3298,7 @@ void t3_sge_stop_dma(struct adapter *adap) ...@@ -3292,7 +3298,7 @@ void t3_sge_stop_dma(struct adapter *adap)
* @adap: the adapter * @adap: the adapter
* *
* Called from process context. Disables the DMA engine and any * Called from process context. Disables the DMA engine and any
* pending queue restart tasklets. * pending queue restart works.
*/ */
void t3_sge_stop(struct adapter *adap) void t3_sge_stop(struct adapter *adap)
{ {
...@@ -3303,8 +3309,8 @@ void t3_sge_stop(struct adapter *adap) ...@@ -3303,8 +3309,8 @@ void t3_sge_stop(struct adapter *adap)
for (i = 0; i < SGE_QSETS; ++i) { for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *qs = &adap->sge.qs[i]; struct sge_qset *qs = &adap->sge.qs[i];
tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment