Commit 4e2f62e5 authored by Jens Axboe's avatar Jens Axboe

Revert "blk-mq: put driver tag when this request is completed"

This reverts commits the following commits:

	37f4a24c
	723bf178
	36a3df5a

The last one is the culprit, but we have to go a bit deeper to get this
to revert cleanly. There's been a report that this breaks some MMC
setups [1], and also causes an issue with swap [2]. Until this can be
figured out, revert the offending commits.

[1] https://lore.kernel.org/linux-block/57fb09b1-54ba-f3aa-f82c-d709b0e6b281@samsung.com/
[2] https://lore.kernel.org/linux-block/20200702043721.GA1087@lca.pw/Reported-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Reported-by: default avatarQian Cai <cai@lca.pw>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b53ac8b8
...@@ -236,10 +236,13 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) ...@@ -236,10 +236,13 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
error = fq->rq_status; error = fq->rq_status;
hctx = flush_rq->mq_hctx; hctx = flush_rq->mq_hctx;
if (!q->elevator) if (!q->elevator) {
flush_rq->tag = BLK_MQ_NO_TAG; blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
else flush_rq->tag = -1;
flush_rq->internal_tag = BLK_MQ_NO_TAG; } else {
blk_mq_put_driver_tag(flush_rq);
flush_rq->internal_tag = -1;
}
running = &fq->flush_queue[fq->flush_running_idx]; running = &fq->flush_queue[fq->flush_running_idx];
BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
...@@ -313,10 +316,13 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, ...@@ -313,10 +316,13 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
flush_rq->mq_ctx = first_rq->mq_ctx; flush_rq->mq_ctx = first_rq->mq_ctx;
flush_rq->mq_hctx = first_rq->mq_hctx; flush_rq->mq_hctx = first_rq->mq_hctx;
if (!q->elevator) if (!q->elevator) {
fq->orig_rq = first_rq;
flush_rq->tag = first_rq->tag; flush_rq->tag = first_rq->tag;
else blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
} else {
flush_rq->internal_tag = first_rq->internal_tag; flush_rq->internal_tag = first_rq->internal_tag;
}
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
...@@ -335,6 +341,11 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) ...@@ -335,6 +341,11 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
unsigned long flags; unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
if (q->elevator) {
WARN_ON(rq->tag < 0);
blk_mq_put_driver_tag(rq);
}
/* /*
* After populating an empty queue, kick it to avoid stall. Read * After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io(). * the comment in flush_end_io().
......
...@@ -101,6 +101,18 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, ...@@ -101,6 +101,18 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return atomic_read(&hctx->nr_active) < depth; return atomic_read(&hctx->nr_active) < depth;
} }
/*
* This helper should only be used for flush request to share tag
* with the request cloned from, and both the two requests can't be
* in flight at the same time. The caller has to make sure the tag
* can't be freed.
*/
static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
unsigned int tag, struct request *rq)
{
hctx->tags->rqs[tag] = rq;
}
static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
unsigned int tag) unsigned int tag)
{ {
......
...@@ -277,20 +277,26 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, ...@@ -277,20 +277,26 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
{ {
struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct request *rq = tags->static_rqs[tag]; struct request *rq = tags->static_rqs[tag];
req_flags_t rq_flags = 0;
if (data->q->elevator) { if (data->q->elevator) {
rq->tag = BLK_MQ_NO_TAG; rq->tag = BLK_MQ_NO_TAG;
rq->internal_tag = tag; rq->internal_tag = tag;
} else { } else {
if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
}
rq->tag = tag; rq->tag = tag;
rq->internal_tag = BLK_MQ_NO_TAG; rq->internal_tag = BLK_MQ_NO_TAG;
data->hctx->tags->rqs[rq->tag] = rq;
} }
/* csd/requeue_work/fifo_time is initialized before use */ /* csd/requeue_work/fifo_time is initialized before use */
rq->q = data->q; rq->q = data->q;
rq->mq_ctx = data->ctx; rq->mq_ctx = data->ctx;
rq->mq_hctx = data->hctx; rq->mq_hctx = data->hctx;
rq->rq_flags = 0; rq->rq_flags = rq_flags;
rq->cmd_flags = data->cmd_flags; rq->cmd_flags = data->cmd_flags;
if (data->flags & BLK_MQ_REQ_PREEMPT) if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT; rq->rq_flags |= RQF_PREEMPT;
...@@ -660,32 +666,10 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) ...@@ -660,32 +666,10 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
return cpu_online(rq->mq_ctx->cpu); return cpu_online(rq->mq_ctx->cpu);
} }
static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = BLK_MQ_NO_TAG;
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
rq->rq_flags &= ~RQF_MQ_INFLIGHT;
atomic_dec(&hctx->nr_active);
}
}
static inline void blk_mq_put_driver_tag(struct request *rq)
{
if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
return;
__blk_mq_put_driver_tag(rq->mq_hctx, rq);
}
bool blk_mq_complete_request_remote(struct request *rq) bool blk_mq_complete_request_remote(struct request *rq)
{ {
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
blk_mq_put_driver_tag(rq);
/* /*
* For a polled request, always complete locallly, it's pointless * For a polled request, always complete locallly, it's pointless
* to redirect the completion. * to redirect the completion.
...@@ -1121,10 +1105,9 @@ static bool __blk_mq_get_driver_tag(struct request *rq) ...@@ -1121,10 +1105,9 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
{ {
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
bool shared = blk_mq_tag_busy(rq->mq_hctx);
int tag; int tag;
blk_mq_tag_busy(rq->mq_hctx);
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
bt = &rq->mq_hctx->tags->breserved_tags; bt = &rq->mq_hctx->tags->breserved_tags;
tag_offset = 0; tag_offset = 0;
...@@ -1137,22 +1120,19 @@ static bool __blk_mq_get_driver_tag(struct request *rq) ...@@ -1137,22 +1120,19 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
return false; return false;
rq->tag = tag + tag_offset; rq->tag = tag + tag_offset;
if (shared) {
rq->rq_flags |= RQF_MQ_INFLIGHT;
atomic_inc(&rq->mq_hctx->nr_active);
}
rq->mq_hctx->tags->rqs[rq->tag] = rq;
return true; return true;
} }
static bool blk_mq_get_driver_tag(struct request *rq) static bool blk_mq_get_driver_tag(struct request *rq)
{ {
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (rq->tag != BLK_MQ_NO_TAG)
return true;
if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq)) return __blk_mq_get_driver_tag(rq);
return false;
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
rq->rq_flags |= RQF_MQ_INFLIGHT;
atomic_inc(&hctx->nr_active);
}
hctx->tags->rqs[rq->tag] = rq;
return true;
} }
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
......
...@@ -193,6 +193,26 @@ static inline bool blk_mq_get_dispatch_budget(struct request_queue *q) ...@@ -193,6 +193,26 @@ static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
return true; return true;
} }
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = BLK_MQ_NO_TAG;
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
rq->rq_flags &= ~RQF_MQ_INFLIGHT;
atomic_dec(&hctx->nr_active);
}
}
static inline void blk_mq_put_driver_tag(struct request *rq)
{
if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
return;
__blk_mq_put_driver_tag(rq->mq_hctx, rq);
}
static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
{ {
int cpu; int cpu;
......
...@@ -25,6 +25,11 @@ struct blk_flush_queue { ...@@ -25,6 +25,11 @@ struct blk_flush_queue {
struct list_head flush_data_in_flight; struct list_head flush_data_in_flight;
struct request *flush_rq; struct request *flush_rq;
/*
* flush_rq shares tag with this rq, both can't be active
* at the same time
*/
struct request *orig_rq;
struct lock_class_key key; struct lock_class_key key;
spinlock_t mq_flush_lock; spinlock_t mq_flush_lock;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment