Commit b175c867 authored by Chengming Zhou's avatar Chengming Zhou Committed by Jens Axboe

blk-flush: count inflight flush_data requests

The flush state machine use a double list to link all inflight
flush_data requests, to avoid issuing separate post-flushes for
these flush_data requests which shared PREFLUSH.

So we can't reuse rq->queuelist, this is why we need rq->flush.list

In preparation of the next patch that reuse rq->queuelist for flush
state machine, we change the double linked list to unsigned long
counter, which count all inflight flush_data requests.

This is ok since we only need to know if there is any inflight
flush_data request, so unsigned long counter is good.
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230717040058.3993930-4-chengming.zhou@linux.devSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 28b24123
...@@ -187,7 +187,8 @@ static void blk_flush_complete_seq(struct request *rq, ...@@ -187,7 +187,8 @@ static void blk_flush_complete_seq(struct request *rq,
break; break;
case REQ_FSEQ_DATA: case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); list_del_init(&rq->flush.list);
fq->flush_data_in_flight++;
spin_lock(&q->requeue_lock); spin_lock(&q->requeue_lock);
list_add(&rq->queuelist, &q->requeue_list); list_add(&rq->queuelist, &q->requeue_list);
spin_unlock(&q->requeue_lock); spin_unlock(&q->requeue_lock);
...@@ -299,7 +300,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, ...@@ -299,7 +300,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
return; return;
/* C2 and C3 */ /* C2 and C3 */
if (!list_empty(&fq->flush_data_in_flight) && if (fq->flush_data_in_flight &&
time_before(jiffies, time_before(jiffies,
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
return; return;
...@@ -374,6 +375,7 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq, ...@@ -374,6 +375,7 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
* the comment in flush_end_io(). * the comment in flush_end_io().
*/ */
spin_lock_irqsave(&fq->mq_flush_lock, flags); spin_lock_irqsave(&fq->mq_flush_lock, flags);
fq->flush_data_in_flight--;
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
...@@ -445,7 +447,7 @@ bool blk_insert_flush(struct request *rq) ...@@ -445,7 +447,7 @@ bool blk_insert_flush(struct request *rq)
blk_rq_init_flush(rq); blk_rq_init_flush(rq);
rq->flush.seq |= REQ_FSEQ_PREFLUSH; rq->flush.seq |= REQ_FSEQ_PREFLUSH;
spin_lock_irq(&fq->mq_flush_lock); spin_lock_irq(&fq->mq_flush_lock);
list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); fq->flush_data_in_flight++;
spin_unlock_irq(&fq->mq_flush_lock); spin_unlock_irq(&fq->mq_flush_lock);
return false; return false;
default: default:
...@@ -496,7 +498,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, ...@@ -496,7 +498,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
INIT_LIST_HEAD(&fq->flush_queue[0]); INIT_LIST_HEAD(&fq->flush_queue[0]);
INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_queue[1]);
INIT_LIST_HEAD(&fq->flush_data_in_flight);
return fq; return fq;
......
...@@ -15,15 +15,14 @@ struct elevator_type; ...@@ -15,15 +15,14 @@ struct elevator_type;
extern struct dentry *blk_debugfs_root; extern struct dentry *blk_debugfs_root;
struct blk_flush_queue { struct blk_flush_queue {
spinlock_t mq_flush_lock;
unsigned int flush_pending_idx:1; unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1; unsigned int flush_running_idx:1;
blk_status_t rq_status; blk_status_t rq_status;
unsigned long flush_pending_since; unsigned long flush_pending_since;
struct list_head flush_queue[2]; struct list_head flush_queue[2];
struct list_head flush_data_in_flight; unsigned long flush_data_in_flight;
struct request *flush_rq; struct request *flush_rq;
spinlock_t mq_flush_lock;
}; };
bool is_flush_rq(struct request *req); bool is_flush_rq(struct request *req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment