Commit 59915143 authored by Jens Axboe's avatar Jens Axboe

io_uring: move timeout opcodes and handling into its own file

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e418bbc9
......@@ -5,5 +5,5 @@
obj-$(CONFIG_IO_URING) += io_uring.o xattr.o nop.o fs.o splice.o \
sync.o advise.o filetable.o \
openclose.o uring_cmd.o epoll.o \
statx.o net.o msg_ring.o
statx.o net.o msg_ring.o timeout.o
obj-$(CONFIG_IO_WQ) += io-wq.o
This diff is collapsed.
......@@ -65,7 +65,8 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
}
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
void io_req_complete_post(struct io_kiocb *req);
void __io_req_complete_post(struct io_kiocb *req);
bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
u32 cflags);
void io_cqring_ev_posted(struct io_ring_ctx *ctx);
......@@ -96,5 +97,15 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
bool io_is_uring_fops(struct file *file);
bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_work_add(struct io_kiocb *req);
void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
void io_req_task_complete(struct io_kiocb *req, bool *locked);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
void io_free_req(struct io_kiocb *req);
void io_queue_next(struct io_kiocb *req);
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
#endif
......@@ -488,4 +488,14 @@ struct io_kiocb {
struct io_wq_work work;
};
struct io_cancel_data {
struct io_ring_ctx *ctx;
union {
u64 data;
struct file *file;
};
u32 flags;
int seq;
};
#endif
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
struct io_timeout_data {
struct io_kiocb *req;
struct hrtimer timer;
struct timespec64 ts;
enum hrtimer_mode mode;
u32 flags;
};
struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
struct io_kiocb *link);
static inline struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req)
{
struct io_kiocb *link = req->link;
if (link && link->opcode == IORING_OP_LINK_TIMEOUT)
return __io_disarm_linked_timeout(req, link);
return NULL;
}
__cold void io_flush_timeouts(struct io_ring_ctx *ctx);
int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all);
void io_queue_linked_timeout(struct io_kiocb *req);
bool io_disarm_next(struct io_kiocb *req);
int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_timeout(struct io_kiocb *req, unsigned int issue_flags);
int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment