Commit 61e98203 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: make op handlers always take issue flags

Make opcode handler interfaces a bit more consistent by always passing
in issue flags. Bulky but pretty easy and mechanical change.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 45d189c6
...@@ -3917,7 +3917,8 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags) ...@@ -3917,7 +3917,8 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
/* /*
* IORING_OP_NOP just posts a completion event, nothing else. * IORING_OP_NOP just posts a completion event, nothing else.
*/ */
static int io_nop(struct io_kiocb *req, struct io_comp_state *cs) static int io_nop(struct io_kiocb *req, unsigned int issue_flags,
struct io_comp_state *cs)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -5581,7 +5582,7 @@ static int io_poll_remove_prep(struct io_kiocb *req, ...@@ -5581,7 +5582,7 @@ static int io_poll_remove_prep(struct io_kiocb *req,
* Find a running poll command that matches one specified in sqe->addr, * Find a running poll command that matches one specified in sqe->addr,
* and remove it if found. * and remove it if found.
*/ */
static int io_poll_remove(struct io_kiocb *req) static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
int ret; int ret;
...@@ -5632,7 +5633,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe ...@@ -5632,7 +5633,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return 0; return 0;
} }
static int io_poll_add(struct io_kiocb *req) static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_poll_iocb *poll = &req->poll; struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -5772,7 +5773,7 @@ static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) ...@@ -5772,7 +5773,7 @@ static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
/* /*
* Remove or update an existing timeout command * Remove or update an existing timeout command
*/ */
static int io_timeout_remove(struct io_kiocb *req) static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_timeout_rem *tr = &req->timeout_rem; struct io_timeout_rem *tr = &req->timeout_rem;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -5828,7 +5829,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -5828,7 +5829,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0; return 0;
} }
static int io_timeout(struct io_kiocb *req) static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data = req->async_data; struct io_timeout_data *data = req->async_data;
...@@ -5951,7 +5952,7 @@ static int io_async_cancel_prep(struct io_kiocb *req, ...@@ -5951,7 +5952,7 @@ static int io_async_cancel_prep(struct io_kiocb *req,
return 0; return 0;
} }
static int io_async_cancel(struct io_kiocb *req) static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -6211,7 +6212,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags, ...@@ -6211,7 +6212,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
switch (req->opcode) { switch (req->opcode) {
case IORING_OP_NOP: case IORING_OP_NOP:
ret = io_nop(req, cs); ret = io_nop(req, issue_flags, cs);
break; break;
case IORING_OP_READV: case IORING_OP_READV:
case IORING_OP_READ_FIXED: case IORING_OP_READ_FIXED:
...@@ -6227,10 +6228,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags, ...@@ -6227,10 +6228,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
ret = io_fsync(req, issue_flags); ret = io_fsync(req, issue_flags);
break; break;
case IORING_OP_POLL_ADD: case IORING_OP_POLL_ADD:
ret = io_poll_add(req); ret = io_poll_add(req, issue_flags);
break; break;
case IORING_OP_POLL_REMOVE: case IORING_OP_POLL_REMOVE:
ret = io_poll_remove(req); ret = io_poll_remove(req, issue_flags);
break; break;
case IORING_OP_SYNC_FILE_RANGE: case IORING_OP_SYNC_FILE_RANGE:
ret = io_sync_file_range(req, issue_flags); ret = io_sync_file_range(req, issue_flags);
...@@ -6248,10 +6249,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags, ...@@ -6248,10 +6249,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
ret = io_recv(req, issue_flags, cs); ret = io_recv(req, issue_flags, cs);
break; break;
case IORING_OP_TIMEOUT: case IORING_OP_TIMEOUT:
ret = io_timeout(req); ret = io_timeout(req, issue_flags);
break; break;
case IORING_OP_TIMEOUT_REMOVE: case IORING_OP_TIMEOUT_REMOVE:
ret = io_timeout_remove(req); ret = io_timeout_remove(req, issue_flags);
break; break;
case IORING_OP_ACCEPT: case IORING_OP_ACCEPT:
ret = io_accept(req, issue_flags, cs); ret = io_accept(req, issue_flags, cs);
...@@ -6260,7 +6261,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags, ...@@ -6260,7 +6261,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
ret = io_connect(req, issue_flags, cs); ret = io_connect(req, issue_flags, cs);
break; break;
case IORING_OP_ASYNC_CANCEL: case IORING_OP_ASYNC_CANCEL:
ret = io_async_cancel(req); ret = io_async_cancel(req, issue_flags);
break; break;
case IORING_OP_FALLOCATE: case IORING_OP_FALLOCATE:
ret = io_fallocate(req, issue_flags); ret = io_fallocate(req, issue_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment