Commit 43a5e4e2 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: blk-mq: support draining mq queue

blk_mq_drain_queue() is introduced so that we can drain
mq queue inside blk_cleanup_queue().

Also don't accept new requests any more if queue is marked
as dying.

Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarMing Lei <tom.leiming@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b28bc9b3
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "blk.h" #include "blk.h"
#include "blk-cgroup.h" #include "blk-cgroup.h"
#include "blk-mq.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
...@@ -497,8 +498,13 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -497,8 +498,13 @@ void blk_cleanup_queue(struct request_queue *q)
* Drain all requests queued before DYING marking. Set DEAD flag to * Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished. * prevent that q->request_fn() gets invoked after draining finished.
*/ */
spin_lock_irq(lock); if (q->mq_ops) {
__blk_drain_queue(q, true); blk_mq_drain_queue(q);
spin_lock_irq(lock);
} else {
spin_lock_irq(lock);
__blk_drain_queue(q, true);
}
queue_flag_set(QUEUE_FLAG_DEAD, q); queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock); spin_unlock_irq(lock);
......
...@@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk; rq->rq_disk = bd_disk;
rq->end_io = done; rq->end_io = done;
/*
* don't check dying flag for MQ because the request won't
* be resued after dying flag is set
*/
if (q->mq_ops) { if (q->mq_ops) {
blk_mq_insert_request(q, rq, true); blk_mq_insert_request(q, rq, true);
return; return;
......
...@@ -106,10 +106,13 @@ static int blk_mq_queue_enter(struct request_queue *q) ...@@ -106,10 +106,13 @@ static int blk_mq_queue_enter(struct request_queue *q)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
!blk_queue_bypass(q), *q->queue_lock); !blk_queue_bypass(q) || blk_queue_dying(q),
*q->queue_lock);
/* inc usage with lock hold to avoid freeze_queue runs here */ /* inc usage with lock hold to avoid freeze_queue runs here */
if (!ret) if (!ret && !blk_queue_dying(q))
__percpu_counter_add(&q->mq_usage_counter, 1, 1000000); __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
else if (blk_queue_dying(q))
ret = -ENODEV;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return ret; return ret;
...@@ -120,6 +123,22 @@ static void blk_mq_queue_exit(struct request_queue *q) ...@@ -120,6 +123,22 @@ static void blk_mq_queue_exit(struct request_queue *q)
__percpu_counter_add(&q->mq_usage_counter, -1, 1000000); __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
} }
static void __blk_mq_drain_queue(struct request_queue *q)
{
while (true) {
s64 count;
spin_lock_irq(q->queue_lock);
count = percpu_counter_sum(&q->mq_usage_counter);
spin_unlock_irq(q->queue_lock);
if (count == 0)
break;
blk_mq_run_queues(q, false);
msleep(10);
}
}
/* /*
* Guarantee no request is in use, so we can change any data structure of * Guarantee no request is in use, so we can change any data structure of
* the queue afterward. * the queue afterward.
...@@ -133,21 +152,13 @@ static void blk_mq_freeze_queue(struct request_queue *q) ...@@ -133,21 +152,13 @@ static void blk_mq_freeze_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_BYPASS, q); queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
if (!drain) if (drain)
return; __blk_mq_drain_queue(q);
}
while (true) {
s64 count;
spin_lock_irq(q->queue_lock);
count = percpu_counter_sum(&q->mq_usage_counter);
spin_unlock_irq(q->queue_lock);
if (count == 0) void blk_mq_drain_queue(struct request_queue *q)
break; {
blk_mq_run_queues(q, false); __blk_mq_drain_queue(q);
msleep(10);
}
} }
static void blk_mq_unfreeze_queue(struct request_queue *q) static void blk_mq_unfreeze_queue(struct request_queue *q)
......
...@@ -27,6 +27,7 @@ void blk_mq_complete_request(struct request *rq, int error); ...@@ -27,6 +27,7 @@ void blk_mq_complete_request(struct request *rq, int error);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async); void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q); void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
/* /*
* CPU hotplug helpers * CPU hotplug helpers
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment