Commit 70460571 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block: Avoid scheduling delayed work on a dead queue

Running a queue must continue after it has been marked dying until
it has been marked dead. So the function blk_run_queue_async() must
not schedule delayed work after blk_cleanup_queue() has marked a queue
dead. Hence add a test for that queue state in blk_run_queue_async()
and make sure that queue_unplugged() invokes that function with the
queue lock held. This avoids that the queue state can change after
it has been tested and before mod_delayed_work() is invoked. Drop
the queue dying test in queue_unplugged() since it is now
superfluous: __blk_run_queue() already tests whether or not the
queue is dead.
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c246e80d
...@@ -219,12 +219,13 @@ static void blk_delay_work(struct work_struct *work) ...@@ -219,12 +219,13 @@ static void blk_delay_work(struct work_struct *work)
* Description: * Description:
* Sometimes queueing needs to be postponed for a little while, to allow * Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is * resources to come back. This function will make sure that queueing is
* restarted around the specified time. * restarted around the specified time. Queue lock must be held.
*/ */
void blk_delay_queue(struct request_queue *q, unsigned long msecs) void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{ {
queue_delayed_work(kblockd_workqueue, &q->delay_work, if (likely(!blk_queue_dead(q)))
msecs_to_jiffies(msecs)); queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
} }
EXPORT_SYMBOL(blk_delay_queue); EXPORT_SYMBOL(blk_delay_queue);
...@@ -334,11 +335,11 @@ EXPORT_SYMBOL(__blk_run_queue); ...@@ -334,11 +335,11 @@ EXPORT_SYMBOL(__blk_run_queue);
* *
* Description: * Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us. * of us. The caller must hold the queue lock.
*/ */
void blk_run_queue_async(struct request_queue *q) void blk_run_queue_async(struct request_queue *q)
{ {
if (likely(!blk_queue_stopped(q))) if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
} }
EXPORT_SYMBOL(blk_run_queue_async); EXPORT_SYMBOL(blk_run_queue_async);
...@@ -2913,27 +2914,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, ...@@ -2913,27 +2914,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
{ {
trace_block_unplug(q, depth, !from_schedule); trace_block_unplug(q, depth, !from_schedule);
/* if (from_schedule)
* Don't mess with a dying queue.
*/
if (unlikely(blk_queue_dying(q))) {
spin_unlock(q->queue_lock);
return;
}
/*
* If we are punting this to kblockd, then we can safely drop
* the queue_lock before waking kblockd (which needs to take
* this lock).
*/
if (from_schedule) {
spin_unlock(q->queue_lock);
blk_run_queue_async(q); blk_run_queue_async(q);
} else { else
__blk_run_queue(q); __blk_run_queue(q);
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
}
} }
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment