Commit 9bce8b2c authored by Ursula Braun's avatar Ursula Braun Committed by Martin Schwidefsky

s390/qdio: avoid reschedule of outbound tasklet once killed

During qdio_shutdown the queue tasklets are killed for all inbound and
outbound queues. The queue structures might be freed after
qdio_shutdown.
Thus it must be guaranteed that these queue tasklets are not rescheduled
after that. In addition the outbound queue timers are deleted and it
must
be guaranteed that these timers are not restarted after qdio_shutdown
processing. Timer deletion should make use of del_timer_sync() to make
sure qdio_outbound_timer() is finished on other CPUs as well. Queue
tasklets should be scheduled in state QDIO_IRQ_STATE_ACTIVE only.
Signed-off-by: default avatarUrsula Braun <ubraun@linux.vnet.ibm.com>
Reviewed-by: default avatarBenjamin Block <bblock@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 6e30c549
...@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q) ...@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q)
q->qdio_error = 0; q->qdio_error = 0;
} }
static inline int qdio_tasklet_schedule(struct qdio_q *q)
{
if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
tasklet_schedule(&q->tasklet);
return 0;
}
return -EPERM;
}
static void __qdio_inbound_processing(struct qdio_q *q) static void __qdio_inbound_processing(struct qdio_q *q)
{ {
qperf_inc(q, tasklet_inbound); qperf_inc(q, tasklet_inbound);
...@@ -698,11 +707,9 @@ static void __qdio_inbound_processing(struct qdio_q *q) ...@@ -698,11 +707,9 @@ static void __qdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) { if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */ /* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched); qperf_inc(q, tasklet_inbound_resched);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { if (!qdio_tasklet_schedule(q))
tasklet_schedule(&q->tasklet);
return; return;
} }
}
qdio_stop_polling(q); qdio_stop_polling(q);
/* /*
...@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) ...@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q)
*/ */
if (!qdio_inbound_q_done(q)) { if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2); qperf_inc(q, tasklet_inbound_resched2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) qdio_tasklet_schedule(q);
tasklet_schedule(&q->tasklet);
} }
} }
...@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q) ...@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q)
* is noticed and outbound_handler is called after some time. * is noticed and outbound_handler is called after some time.
*/ */
if (qdio_outbound_q_done(q)) if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer); del_timer_sync(&q->u.out.timer);
else else
if (!timer_pending(&q->u.out.timer)) if (!timer_pending(&q->u.out.timer) &&
likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ); mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
return; return;
sched: sched:
if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) qdio_tasklet_schedule(q);
return;
tasklet_schedule(&q->tasklet);
} }
/* outbound tasklet */ /* outbound tasklet */
...@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data) ...@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data)
{ {
struct qdio_q *q = (struct qdio_q *)data; struct qdio_q *q = (struct qdio_q *)data;
if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) qdio_tasklet_schedule(q);
return;
tasklet_schedule(&q->tasklet);
} }
static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
...@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) ...@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
for_each_output_queue(q->irq_ptr, out, i) for_each_output_queue(q->irq_ptr, out, i)
if (!qdio_outbound_q_done(out)) if (!qdio_outbound_q_done(out))
tasklet_schedule(&out->tasklet); qdio_tasklet_schedule(out);
} }
static void __tiqdio_inbound_processing(struct qdio_q *q) static void __tiqdio_inbound_processing(struct qdio_q *q)
...@@ -929,11 +932,9 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) ...@@ -929,11 +932,9 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) { if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched); qperf_inc(q, tasklet_inbound_resched);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { if (!qdio_tasklet_schedule(q))
tasklet_schedule(&q->tasklet);
return; return;
} }
}
qdio_stop_polling(q); qdio_stop_polling(q);
/* /*
...@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) ...@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
*/ */
if (!qdio_inbound_q_done(q)) { if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2); qperf_inc(q, tasklet_inbound_resched2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) qdio_tasklet_schedule(q);
tasklet_schedule(&q->tasklet);
} }
} }
...@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) ...@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
int i; int i;
struct qdio_q *q; struct qdio_q *q;
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return; return;
for_each_input_queue(irq_ptr, q, i) { for_each_input_queue(irq_ptr, q, i) {
...@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) ...@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
continue; continue;
if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
qdio_siga_sync_q(q); qdio_siga_sync_q(q);
tasklet_schedule(&q->tasklet); qdio_tasklet_schedule(q);
} }
} }
...@@ -1145,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) ...@@ -1145,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
tasklet_kill(&q->tasklet); tasklet_kill(&q->tasklet);
for_each_output_queue(irq_ptr, q, i) { for_each_output_queue(irq_ptr, q, i) {
del_timer(&q->u.out.timer); del_timer_sync(&q->u.out.timer);
tasklet_kill(&q->tasklet); tasklet_kill(&q->tasklet);
} }
} }
...@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, ...@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
/* in case of SIGA errors we must process the error immediately */ /* in case of SIGA errors we must process the error immediately */
if (used >= q->u.out.scan_threshold || rc) if (used >= q->u.out.scan_threshold || rc)
tasklet_schedule(&q->tasklet); qdio_tasklet_schedule(q);
else else
/* free the SBALs in case of no further traffic */ /* free the SBALs in case of no further traffic */
if (!timer_pending(&q->u.out.timer)) if (!timer_pending(&q->u.out.timer) &&
likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
mod_timer(&q->u.out.timer, jiffies + HZ); mod_timer(&q->u.out.timer, jiffies + HZ);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment