Commit 4e906702 authored by Jens Axboe's avatar Jens Axboe

io_uring: always use req->buf_index for the provided buffer group

The read/write opcodes use it already, but the recv/recvmsg do not. If
we switch them over and read and validate this at init time while we're
checking if the opcode supports it anyway, then we can do it in one spot
and we don't have to pass in a separate group ID for io_buffer_select().
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bb68d504
...@@ -634,7 +634,6 @@ struct io_sr_msg { ...@@ -634,7 +634,6 @@ struct io_sr_msg {
void __user *buf; void __user *buf;
}; };
int msg_flags; int msg_flags;
int bgid;
size_t len; size_t len;
size_t done_io; size_t done_io;
unsigned int flags; unsigned int flags;
...@@ -3299,6 +3298,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -3299,6 +3298,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->rw.addr = READ_ONCE(sqe->addr); req->rw.addr = READ_ONCE(sqe->addr);
req->rw.len = READ_ONCE(sqe->len); req->rw.len = READ_ONCE(sqe->len);
req->rw.flags = READ_ONCE(sqe->rw_flags); req->rw.flags = READ_ONCE(sqe->rw_flags);
/* used for fixed read/write too - just read unconditionally */
req->buf_index = READ_ONCE(sqe->buf_index); req->buf_index = READ_ONCE(sqe->buf_index);
return 0; return 0;
} }
...@@ -3459,7 +3459,7 @@ static void io_buffer_add_list(struct io_ring_ctx *ctx, ...@@ -3459,7 +3459,7 @@ static void io_buffer_add_list(struct io_ring_ctx *ctx,
} }
static void __user *io_buffer_select(struct io_kiocb *req, size_t *len, static void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
int bgid, unsigned int issue_flags) unsigned int issue_flags)
{ {
struct io_buffer *kbuf = req->kbuf; struct io_buffer *kbuf = req->kbuf;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -3470,7 +3470,7 @@ static void __user *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3470,7 +3470,7 @@ static void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
io_ring_submit_lock(req->ctx, issue_flags); io_ring_submit_lock(req->ctx, issue_flags);
bl = io_buffer_get_list(ctx, bgid); bl = io_buffer_get_list(ctx, req->buf_index);
if (bl && !list_empty(&bl->buf_list)) { if (bl && !list_empty(&bl->buf_list)) {
kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&kbuf->list); list_del(&kbuf->list);
...@@ -3504,7 +3504,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, ...@@ -3504,7 +3504,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
return -EINVAL; return -EINVAL;
len = clen; len = clen;
buf = io_buffer_select(req, &len, req->buf_index, issue_flags); buf = io_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf)) if (IS_ERR(buf))
return PTR_ERR(buf); return PTR_ERR(buf);
iov[0].iov_base = buf; iov[0].iov_base = buf;
...@@ -3526,7 +3526,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, ...@@ -3526,7 +3526,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
len = iov[0].iov_len; len = iov[0].iov_len;
if (len < 0) if (len < 0)
return -EINVAL; return -EINVAL;
buf = io_buffer_select(req, &len, req->buf_index, issue_flags); buf = io_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf)) if (IS_ERR(buf))
return PTR_ERR(buf); return PTR_ERR(buf);
iov[0].iov_base = buf; iov[0].iov_base = buf;
...@@ -3578,8 +3578,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req, ...@@ -3578,8 +3578,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
if (req->flags & REQ_F_BUFFER_SELECT) { if (req->flags & REQ_F_BUFFER_SELECT) {
buf = io_buffer_select(req, &sqe_len, req->buf_index, buf = io_buffer_select(req, &sqe_len, issue_flags);
issue_flags);
if (IS_ERR(buf)) if (IS_ERR(buf))
return ERR_CAST(buf); return ERR_CAST(buf);
req->rw.len = sqe_len; req->rw.len = sqe_len;
...@@ -5501,7 +5500,6 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5501,7 +5500,6 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->flags = READ_ONCE(sqe->addr2); sr->flags = READ_ONCE(sqe->addr2);
if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
return -EINVAL; return -EINVAL;
sr->bgid = READ_ONCE(sqe->buf_group);
sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
if (sr->msg_flags & MSG_DONTWAIT) if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT; req->flags |= REQ_F_NOWAIT;
...@@ -5543,7 +5541,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5543,7 +5541,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (req->flags & REQ_F_BUFFER_SELECT) { if (req->flags & REQ_F_BUFFER_SELECT) {
void __user *buf; void __user *buf;
buf = io_buffer_select(req, &sr->len, sr->bgid, issue_flags); buf = io_buffer_select(req, &sr->len, issue_flags);
if (IS_ERR(buf)) if (IS_ERR(buf))
return PTR_ERR(buf); return PTR_ERR(buf);
kmsg->fast_iov[0].iov_base = buf; kmsg->fast_iov[0].iov_base = buf;
...@@ -5607,7 +5605,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5607,7 +5605,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
if (req->flags & REQ_F_BUFFER_SELECT) { if (req->flags & REQ_F_BUFFER_SELECT) {
void __user *buf; void __user *buf;
buf = io_buffer_select(req, &sr->len, sr->bgid, issue_flags); buf = io_buffer_select(req, &sr->len, issue_flags);
if (IS_ERR(buf)) if (IS_ERR(buf))
return PTR_ERR(buf); return PTR_ERR(buf);
sr->buf = buf; sr->buf = buf;
...@@ -7777,9 +7775,11 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -7777,9 +7775,11 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
/* enforce forwards compatibility on users */ /* enforce forwards compatibility on users */
if (sqe_flags & ~SQE_VALID_FLAGS) if (sqe_flags & ~SQE_VALID_FLAGS)
return -EINVAL; return -EINVAL;
if ((sqe_flags & IOSQE_BUFFER_SELECT) && if (sqe_flags & IOSQE_BUFFER_SELECT) {
!io_op_defs[opcode].buffer_select) if (!io_op_defs[opcode].buffer_select)
return -EOPNOTSUPP; return -EOPNOTSUPP;
req->buf_index = READ_ONCE(sqe->buf_group);
}
if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS) if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
ctx->drain_disabled = true; ctx->drain_disabled = true;
if (sqe_flags & IOSQE_IO_DRAIN) { if (sqe_flags & IOSQE_IO_DRAIN) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment