Commit d391a7a3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: merge the softirq vs non-softirq IPI logic

Both the softirq path for single queue devices and the multi-queue
completion handler share the same logic to figure out if we need an
IPI for the completion and eventually issue it.  Merge the two
versions into a single unified code path.
Reviewed-by: default avatarDaniel Wagner <dwagner@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d6cc464c
...@@ -600,8 +600,11 @@ static __latent_entropy void blk_done_softirq(struct softirq_action *h) ...@@ -600,8 +600,11 @@ static __latent_entropy void blk_done_softirq(struct softirq_action *h)
static void blk_mq_trigger_softirq(struct request *rq) static void blk_mq_trigger_softirq(struct request *rq)
{ {
struct list_head *list = this_cpu_ptr(&blk_cpu_done); struct list_head *list;
unsigned long flags;
local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
list_add_tail(&rq->ipi_list, list); list_add_tail(&rq->ipi_list, list);
/* /*
...@@ -611,11 +614,7 @@ static void blk_mq_trigger_softirq(struct request *rq) ...@@ -611,11 +614,7 @@ static void blk_mq_trigger_softirq(struct request *rq)
*/ */
if (list->next == &rq->ipi_list) if (list->next == &rq->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ); raise_softirq_irqoff(BLOCK_SOFTIRQ);
} local_irq_restore(flags);
static void trigger_softirq(void *data)
{
blk_mq_trigger_softirq(data);
} }
static int blk_softirq_cpu_dead(unsigned int cpu) static int blk_softirq_cpu_dead(unsigned int cpu)
...@@ -633,56 +632,26 @@ static int blk_softirq_cpu_dead(unsigned int cpu) ...@@ -633,56 +632,26 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
return 0; return 0;
} }
static void __blk_complete_request(struct request *req) static void __blk_mq_complete_request(struct request *rq)
{ {
struct request_queue *q = req->q;
int cpu, ccpu = req->mq_ctx->cpu;
unsigned long flags;
bool shared = false;
BUG_ON(!q->mq_ops->complete);
local_irq_save(flags);
cpu = smp_processor_id();
/* /*
* Select completion CPU * For most of single queue controllers, there is only one irq vector
*/ * for handling I/O completion, and the only irq's affinity is set
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) { * to all possible CPUs. On most of ARCHs, this affinity means the irq
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) * is handled on one specific CPU.
shared = cpus_share_cache(cpu, ccpu); *
} else * So complete I/O requests in softirq context in case of single queue
ccpu = cpu; * devices to avoid degrading I/O performance due to irqsoff latency.
/*
* If current CPU and requested CPU share a cache, run the softirq on
* the current CPU. One might concern this is just like
* QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
* running in interrupt handler, and currently I/O controller doesn't
* support multiple interrupts, so current CPU is unique actually. This
* avoids IPI sending from current CPU to the first CPU of a group.
*/ */
if (IS_ENABLED(CONFIG_SMP) && if (rq->q->nr_hw_queues == 1)
ccpu != cpu && !shared && cpu_online(ccpu)) { blk_mq_trigger_softirq(rq);
call_single_data_t *data = &req->csd; else
rq->q->mq_ops->complete(rq);
data->func = trigger_softirq;
data->info = req;
data->flags = 0;
smp_call_function_single_async(cpu, data);
} else {
blk_mq_trigger_softirq(req);
}
local_irq_restore(flags);
} }
static void __blk_mq_complete_request_remote(void *data) static void __blk_mq_complete_request_remote(void *data)
{ {
struct request *rq = data; __blk_mq_complete_request(data);
struct request_queue *q = rq->q;
q->mq_ops->complete(rq);
} }
/** /**
...@@ -713,23 +682,9 @@ void blk_mq_force_complete_rq(struct request *rq) ...@@ -713,23 +682,9 @@ void blk_mq_force_complete_rq(struct request *rq)
return; return;
} }
/*
* Most of single queue controllers, there is only one irq vector
* for handling IO completion, and the only irq's affinity is set
* as all possible CPUs. On most of ARCHs, this affinity means the
* irq is handled on one specific CPU.
*
* So complete IO reqeust in softirq context in case of single queue
* for not degrading IO performance by irqsoff latency.
*/
if (q->nr_hw_queues == 1) {
__blk_complete_request(rq);
return;
}
if (!IS_ENABLED(CONFIG_SMP) || if (!IS_ENABLED(CONFIG_SMP) ||
!test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) { !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
q->mq_ops->complete(rq); __blk_mq_complete_request(rq);
return; return;
} }
...@@ -743,7 +698,7 @@ void blk_mq_force_complete_rq(struct request *rq) ...@@ -743,7 +698,7 @@ void blk_mq_force_complete_rq(struct request *rq)
rq->csd.flags = 0; rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd); smp_call_function_single_async(ctx->cpu, &rq->csd);
} else { } else {
q->mq_ops->complete(rq); __blk_mq_complete_request(rq);
} }
put_cpu(); put_cpu();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment