Commit 660e802c authored by Chengming Zhou's avatar Chengming Zhou Committed by Jens Axboe

blk-mq: use percpu csd to remote complete instead of per-rq csd

If request need to be completed remotely, we insert it into percpu llist,
and smp_call_function_single_async() if llist is empty previously.

We don't need to use per-rq csd, percpu csd is enough. And the size of
struct request is decreased by 24 bytes.

This way is cleaner, and looks correct, given block softirq is guaranteed
to be scheduled to consume the list if one new request is added to this
percpu list, either smp_call_function_single_async() returns -EBUSY or 0.
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230717040058.3993930-2-chengming.zhou@linux.devSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 43c9835b
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include "blk-ioprio.h" #include "blk-ioprio.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
static void blk_mq_request_bypass_insert(struct request *rq, static void blk_mq_request_bypass_insert(struct request *rq,
...@@ -1157,15 +1158,11 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) ...@@ -1157,15 +1158,11 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
static void blk_mq_complete_send_ipi(struct request *rq) static void blk_mq_complete_send_ipi(struct request *rq)
{ {
struct llist_head *list;
unsigned int cpu; unsigned int cpu;
cpu = rq->mq_ctx->cpu; cpu = rq->mq_ctx->cpu;
list = &per_cpu(blk_cpu_done, cpu); if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
if (llist_add(&rq->ipi_list, list)) { smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
smp_call_function_single_async(cpu, &rq->csd);
}
} }
static void blk_mq_raise_softirq(struct request *rq) static void blk_mq_raise_softirq(struct request *rq)
...@@ -4829,6 +4826,9 @@ static int __init blk_mq_init(void) ...@@ -4829,6 +4826,9 @@ static int __init blk_mq_init(void)
for_each_possible_cpu(i) for_each_possible_cpu(i)
init_llist_head(&per_cpu(blk_cpu_done, i)); init_llist_head(&per_cpu(blk_cpu_done, i));
for_each_possible_cpu(i)
INIT_CSD(&per_cpu(blk_cpu_csd, i),
__blk_mq_complete_request_remote, NULL);
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
......
...@@ -182,10 +182,7 @@ struct request { ...@@ -182,10 +182,7 @@ struct request {
rq_end_io_fn *saved_end_io; rq_end_io_fn *saved_end_io;
} flush; } flush;
union {
struct __call_single_data csd;
u64 fifo_time; u64 fifo_time;
};
/* /*
* completion callback. * completion callback.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment