Commit cf9446cc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.10-2020-10-30' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

 - Fixes for linked timeouts (Pavel)

 - Set IO_WQ_WORK_CONCURRENT early for async offload (Pavel)

 - Two minor simplifications that make the code easier to read and
   follow (Pavel)

* tag 'io_uring-5.10-2020-10-30' of git://git.kernel.dk/linux-block:
  io_uring: use type appropriate io_kiocb handler for double poll
  io_uring: simplify __io_queue_sqe()
  io_uring: simplify nxt propagation in io_queue_sqe
  io_uring: don't miss setting IO_WQ_WORK_CONCURRENT
  io_uring: don't defer put of cancelled ltimeout
  io_uring: always clear LINK_TIMEOUT after cancel
  io_uring: don't adjust LINK_HEAD in cancel ltimeout
  io_uring: remove opcode check on ltimeout kill
parents 8f9a2a19 c8b5e260
...@@ -1365,6 +1365,9 @@ static void io_prep_async_work(struct io_kiocb *req) ...@@ -1365,6 +1365,9 @@ static void io_prep_async_work(struct io_kiocb *req)
io_req_init_async(req); io_req_init_async(req);
id = req->work.identity; id = req->work.identity;
if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT;
if (req->flags & REQ_F_ISREG) { if (req->flags & REQ_F_ISREG) {
if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL)) if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file)); io_wq_hash_work(&req->work, file_inode(req->file));
...@@ -1846,59 +1849,39 @@ static void __io_free_req(struct io_kiocb *req) ...@@ -1846,59 +1849,39 @@ static void __io_free_req(struct io_kiocb *req)
percpu_ref_put(&ctx->refs); percpu_ref_put(&ctx->refs);
} }
static bool io_link_cancel_timeout(struct io_kiocb *req) static void io_kill_linked_timeout(struct io_kiocb *req)
{ {
struct io_timeout_data *io = req->async_data;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
int ret;
ret = hrtimer_try_to_cancel(&io->timer);
if (ret != -1) {
io_cqring_fill_event(req, -ECANCELED);
io_commit_cqring(ctx);
req->flags &= ~REQ_F_LINK_HEAD;
io_put_req_deferred(req, 1);
return true;
}
return false;
}
static bool __io_kill_linked_timeout(struct io_kiocb *req)
{
struct io_kiocb *link; struct io_kiocb *link;
bool wake_ev; bool cancelled = false;
unsigned long flags;
if (list_empty(&req->link_list)) spin_lock_irqsave(&ctx->completion_lock, flags);
return false; link = list_first_entry_or_null(&req->link_list, struct io_kiocb,
link = list_first_entry(&req->link_list, struct io_kiocb, link_list); link_list);
if (link->opcode != IORING_OP_LINK_TIMEOUT)
return false;
/* /*
* Can happen if a linked timeout fired and link had been like * Can happen if a linked timeout fired and link had been like
* req -> link t-out -> link t-out [-> ...] * req -> link t-out -> link t-out [-> ...]
*/ */
if (!(link->flags & REQ_F_LTIMEOUT_ACTIVE)) if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
return false; struct io_timeout_data *io = link->async_data;
int ret;
list_del_init(&link->link_list); list_del_init(&link->link_list);
wake_ev = io_link_cancel_timeout(link); ret = hrtimer_try_to_cancel(&io->timer);
if (ret != -1) {
io_cqring_fill_event(link, -ECANCELED);
io_commit_cqring(ctx);
cancelled = true;
}
}
req->flags &= ~REQ_F_LINK_TIMEOUT; req->flags &= ~REQ_F_LINK_TIMEOUT;
return wake_ev;
}
static void io_kill_linked_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
bool wake_ev;
spin_lock_irqsave(&ctx->completion_lock, flags);
wake_ev = __io_kill_linked_timeout(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (wake_ev) if (cancelled) {
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
io_put_req(link);
}
} }
static struct io_kiocb *io_req_link_next(struct io_kiocb *req) static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
...@@ -4977,8 +4960,10 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, ...@@ -4977,8 +4960,10 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
/* make sure double remove sees this as being gone */ /* make sure double remove sees this as being gone */
wait->private = NULL; wait->private = NULL;
spin_unlock(&poll->head->lock); spin_unlock(&poll->head->lock);
if (!done) if (!done) {
__io_async_wake(req, poll, mask, io_poll_task_func); /* use wait func handler, so it matches the rq type */
poll->wait.func(&poll->wait, mode, sync, key);
}
} }
refcount_dec(&req->refs); refcount_dec(&req->refs);
return 1; return 1;
...@@ -6180,7 +6165,6 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) ...@@ -6180,7 +6165,6 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs) static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
{ {
struct io_kiocb *linked_timeout; struct io_kiocb *linked_timeout;
struct io_kiocb *nxt;
const struct cred *old_creds = NULL; const struct cred *old_creds = NULL;
int ret; int ret;
...@@ -6206,7 +6190,6 @@ static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs) ...@@ -6206,7 +6190,6 @@ static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
*/ */
if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
if (!io_arm_poll_handler(req)) { if (!io_arm_poll_handler(req)) {
punt:
/* /*
* Queued up for async execution, worker will release * Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted. * submit reference when the iocb is actually submitted.
...@@ -6216,33 +6199,25 @@ static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs) ...@@ -6216,33 +6199,25 @@ static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
if (linked_timeout) if (linked_timeout)
io_queue_linked_timeout(linked_timeout); io_queue_linked_timeout(linked_timeout);
goto exit; } else if (likely(!ret)) {
} /* drop submission reference */
req = io_put_req_find_next(req);
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
if (unlikely(ret)) { if (req) {
if (!(req->flags & REQ_F_FORCE_ASYNC))
goto again;
io_queue_async_work(req);
}
} else {
/* un-prep timeout, so it'll be killed as any other linked */ /* un-prep timeout, so it'll be killed as any other linked */
req->flags &= ~REQ_F_LINK_TIMEOUT; req->flags &= ~REQ_F_LINK_TIMEOUT;
req_set_fail_links(req); req_set_fail_links(req);
io_put_req(req); io_put_req(req);
io_req_complete(req, ret); io_req_complete(req, ret);
goto exit;
} }
/* drop submission reference */
nxt = io_put_req_find_next(req);
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
if (nxt) {
req = nxt;
if (req->flags & REQ_F_FORCE_ASYNC) {
linked_timeout = NULL;
goto punt;
}
goto again;
}
exit:
if (old_creds) if (old_creds)
revert_creds(old_creds); revert_creds(old_creds);
} }
...@@ -6266,13 +6241,6 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -6266,13 +6241,6 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (unlikely(ret)) if (unlikely(ret))
goto fail_req; goto fail_req;
} }
/*
* Never try inline submit of IOSQE_ASYNC is set, go straight
* to async execution.
*/
io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_CONCURRENT;
io_queue_async_work(req); io_queue_async_work(req);
} else { } else {
if (sqe) { if (sqe) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment