Commit 1671d522 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: rename blk_mq_freeze_queue_start()

As the .q_usage_counter is used by both legacy and
mq path, we need to block new I/O if queue becomes
dead in blk_queue_enter().

So rename it and we can use this function in both
paths.
Reviewed-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMing Lei <tom.leiming@gmail.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 5ed61d3f
...@@ -670,7 +670,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait) ...@@ -670,7 +670,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
return -EBUSY; return -EBUSY;
/* /*
* read pair of barrier in blk_mq_freeze_queue_start(), * read pair of barrier in blk_freeze_queue_start(),
* we need to order reading __PERCPU_REF_DEAD flag of * we need to order reading __PERCPU_REF_DEAD flag of
* .q_usage_counter and reading .mq_freeze_depth, * .q_usage_counter and reading .mq_freeze_depth,
* otherwise the following wait may never return if the * otherwise the following wait may never return if the
......
...@@ -68,7 +68,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, ...@@ -68,7 +68,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
} }
void blk_mq_freeze_queue_start(struct request_queue *q) void blk_freeze_queue_start(struct request_queue *q)
{ {
int freeze_depth; int freeze_depth;
...@@ -78,7 +78,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q) ...@@ -78,7 +78,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
blk_mq_run_hw_queues(q, false); blk_mq_run_hw_queues(q, false);
} }
} }
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
void blk_mq_freeze_queue_wait(struct request_queue *q) void blk_mq_freeze_queue_wait(struct request_queue *q)
{ {
...@@ -108,7 +108,7 @@ void blk_freeze_queue(struct request_queue *q) ...@@ -108,7 +108,7 @@ void blk_freeze_queue(struct request_queue *q)
* no blk_unfreeze_queue(), and blk_freeze_queue() is not * no blk_unfreeze_queue(), and blk_freeze_queue() is not
* exported to drivers as the only user for unfreeze is blk_mq. * exported to drivers as the only user for unfreeze is blk_mq.
*/ */
blk_mq_freeze_queue_start(q); blk_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q); blk_mq_freeze_queue_wait(q);
} }
...@@ -746,7 +746,7 @@ static void blk_mq_timeout_work(struct work_struct *work) ...@@ -746,7 +746,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
* percpu_ref_tryget directly, because we need to be able to * percpu_ref_tryget directly, because we need to be able to
* obtain a reference even in the short window between the queue * obtain a reference even in the short window between the queue
* starting to freeze, by dropping the first reference in * starting to freeze, by dropping the first reference in
* blk_mq_freeze_queue_start, and the moment the last request is * blk_freeze_queue_start, and the moment the last request is
* consumed, marked by the instant q_usage_counter reaches * consumed, marked by the instant q_usage_counter reaches
* zero. * zero.
*/ */
...@@ -2376,7 +2376,7 @@ static void blk_mq_queue_reinit_work(void) ...@@ -2376,7 +2376,7 @@ static void blk_mq_queue_reinit_work(void)
* take place in parallel. * take place in parallel.
*/ */
list_for_each_entry(q, &all_q_list, all_q_node) list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_start(q); blk_freeze_queue_start(q);
list_for_each_entry(q, &all_q_list, all_q_node) list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_wait(q); blk_mq_freeze_queue_wait(q);
......
...@@ -4162,7 +4162,7 @@ static int mtip_block_remove(struct driver_data *dd) ...@@ -4162,7 +4162,7 @@ static int mtip_block_remove(struct driver_data *dd)
dev_info(&dd->pdev->dev, "device %s surprise removal\n", dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name); dd->disk->disk_name);
blk_mq_freeze_queue_start(dd->queue); blk_freeze_queue_start(dd->queue);
blk_mq_stop_hw_queues(dd->queue); blk_mq_stop_hw_queues(dd->queue);
blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd); blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
......
...@@ -2386,7 +2386,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl) ...@@ -2386,7 +2386,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
mutex_lock(&ctrl->namespaces_mutex); mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) list_for_each_entry(ns, &ctrl->namespaces, list)
blk_mq_freeze_queue_start(ns->queue); blk_freeze_queue_start(ns->queue);
mutex_unlock(&ctrl->namespaces_mutex); mutex_unlock(&ctrl->namespaces_mutex);
} }
EXPORT_SYMBOL_GPL(nvme_start_freeze); EXPORT_SYMBOL_GPL(nvme_start_freeze);
......
...@@ -243,7 +243,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, ...@@ -243,7 +243,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv); busy_tag_iter_fn *fn, void *priv);
void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q); void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q); void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout); unsigned long timeout);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment