Commit d15bb3a6 authored by Bart Van Assche's avatar Bart Van Assche Committed by Mike Snitzer

dm rq: fix a race condition in rq_completed()

It is required to hold the queue lock when calling blk_run_queue_async()
to avoid that a race between blk_run_queue_async() and
blk_cleanup_queue() is triggered.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 2e8ed711
...@@ -226,6 +226,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) ...@@ -226,6 +226,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/ */
static void rq_completed(struct mapped_device *md, int rw, bool run_queue) static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{ {
struct request_queue *q = md->queue;
unsigned long flags;
atomic_dec(&md->pending[rw]); atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */ /* nudge anyone waiting on suspend queue */
...@@ -238,8 +241,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) ...@@ -238,8 +241,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the * back into ->request_fn() could deadlock attempting to grab the
* queue lock again. * queue lock again.
*/ */
if (!md->queue->mq_ops && run_queue) if (!q->mq_ops && run_queue) {
blk_run_queue_async(md->queue); spin_lock_irqsave(q->queue_lock, flags);
blk_run_queue_async(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
/* /*
* dm_put() must be at the end of this function. See the comment above * dm_put() must be at the end of this function. See the comment above
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment