Commit 915b3dde authored by Hao Xu's avatar Hao Xu Committed by Jens Axboe

io_uring: spin in iopoll() only when reqs are in a single queue

We currently spin in iopoll() when requests to be iopolled are for
same file(device), while one device may have multiple hardware queues.
given an example:

hw_queue_0     |    hw_queue_1
req(30us)           req(10us)

If we first spin on iopolling for the hw_queue_0. the avg latency would
be (30us + 30us) / 2 = 30us. While if we do round robin, the avg
latency would be (30us + 10us) / 2 = 20us since we reap the request in
hw_queue_1 in time. So it's better to do spinning only when requests
are in same hardware queue.
Signed-off-by: default avatarHao Xu <haoxu@linux.alibaba.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 99ebe4ef
...@@ -434,7 +434,7 @@ struct io_ring_ctx { ...@@ -434,7 +434,7 @@ struct io_ring_ctx {
struct list_head iopoll_list; struct list_head iopoll_list;
struct hlist_head *cancel_hash; struct hlist_head *cancel_hash;
unsigned cancel_hash_bits; unsigned cancel_hash_bits;
bool poll_multi_file; bool poll_multi_queue;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct io_restriction restrictions; struct io_restriction restrictions;
...@@ -2314,7 +2314,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2314,7 +2314,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
* Only spin for completions if we don't have multiple devices hanging * Only spin for completions if we don't have multiple devices hanging
* off our complete list, and we're under the requested amount. * off our complete list, and we're under the requested amount.
*/ */
spin = !ctx->poll_multi_file && *nr_events < min; spin = !ctx->poll_multi_queue && *nr_events < min;
ret = 0; ret = 0;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
...@@ -2553,14 +2553,22 @@ static void io_iopoll_req_issued(struct io_kiocb *req) ...@@ -2553,14 +2553,22 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
* different devices. * different devices.
*/ */
if (list_empty(&ctx->iopoll_list)) { if (list_empty(&ctx->iopoll_list)) {
ctx->poll_multi_file = false; ctx->poll_multi_queue = false;
} else if (!ctx->poll_multi_file) { } else if (!ctx->poll_multi_queue) {
struct io_kiocb *list_req; struct io_kiocb *list_req;
unsigned int queue_num0, queue_num1;
list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
inflight_entry); inflight_entry);
if (list_req->file != req->file)
ctx->poll_multi_file = true; if (list_req->file != req->file) {
ctx->poll_multi_queue = true;
} else {
queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
if (queue_num0 != queue_num1)
ctx->poll_multi_queue = true;
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment