Commit 776687bc authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block, blk-mq: draining can't be skipped even if bypass_depth was non-zero

Currently, both blk_queue_bypass_start() and blk_mq_freeze_queue()
skip queue draining if bypass_depth was already above zero.  The
assumption is that the one which bumped the bypass_depth should have
performed draining already; however, there's nothing which prevents a
new instance of bypassing/freezing from starting before the previous
one finishes draining.  The current code may allow the later
bypassing/freezing instances to complete while there still are
in-flight requests which haven't finished draining.

Fix it by draining regardless of bypass_depth.  We still skip draining
from blk_queue_bypass_start() while the queue is initializing to avoid
introducing excessive delays during boot.  INIT_DONE setting is moved
above the initial blk_queue_bypass_end() so that bypassing attempts
can't slip inbetween.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 531ed626
...@@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) ...@@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
*/ */
void blk_queue_bypass_start(struct request_queue *q) void blk_queue_bypass_start(struct request_queue *q)
{ {
bool drain;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
drain = !q->bypass_depth++; q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q); queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
if (drain) { /*
* Queues start drained. Skip actual draining till init is
* complete. This avoids lenghty delays during queue init which
* can happen many times during boot.
*/
if (blk_queue_init_done(q)) {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__blk_drain_queue(q, false); __blk_drain_queue(q, false);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
......
...@@ -131,14 +131,11 @@ void blk_mq_drain_queue(struct request_queue *q) ...@@ -131,14 +131,11 @@ void blk_mq_drain_queue(struct request_queue *q)
*/ */
static void blk_mq_freeze_queue(struct request_queue *q) static void blk_mq_freeze_queue(struct request_queue *q)
{ {
bool drain;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
drain = !q->bypass_depth++; q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q); queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
if (drain)
blk_mq_drain_queue(q); blk_mq_drain_queue(q);
} }
......
...@@ -554,8 +554,8 @@ int blk_register_queue(struct gendisk *disk) ...@@ -554,8 +554,8 @@ int blk_register_queue(struct gendisk *disk)
* Initialization must be complete by now. Finish the initial * Initialization must be complete by now. Finish the initial
* bypass from queue allocation. * bypass from queue allocation.
*/ */
blk_queue_bypass_end(q);
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
blk_queue_bypass_end(q);
ret = blk_trace_init_sysfs(dev); ret = blk_trace_init_sysfs(dev);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment