Commit a69307a5 authored by Jens Axboe's avatar Jens Axboe

io_uring/kbuf: turn io_buffer_list booleans into flags

We could just move these two and save some space, but in preparation
for adding another flag, turn them into flags first.

This saves 8 bytes in struct io_buffer_list, making it exactly half
a cacheline on 64-bit archs now rather than 40 bytes.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 566a4242
...@@ -189,7 +189,7 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -189,7 +189,7 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
bl = io_buffer_get_list(ctx, req->buf_index); bl = io_buffer_get_list(ctx, req->buf_index);
if (likely(bl)) { if (likely(bl)) {
if (bl->is_buf_ring) if (bl->flags & IOBL_BUF_RING)
ret = io_ring_buffer_select(req, len, bl, issue_flags); ret = io_ring_buffer_select(req, len, bl, issue_flags);
else else
ret = io_provided_buffer_select(req, len, bl); ret = io_provided_buffer_select(req, len, bl);
...@@ -287,7 +287,7 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, ...@@ -287,7 +287,7 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
if (unlikely(!bl)) if (unlikely(!bl))
goto out_unlock; goto out_unlock;
if (bl->is_buf_ring) { if (bl->flags & IOBL_BUF_RING) {
ret = io_ring_buffers_peek(req, arg, bl); ret = io_ring_buffers_peek(req, arg, bl);
/* /*
* Don't recycle these buffers if we need to go through poll. * Don't recycle these buffers if we need to go through poll.
...@@ -320,7 +320,7 @@ int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg) ...@@ -320,7 +320,7 @@ int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
if (unlikely(!bl)) if (unlikely(!bl))
return -ENOENT; return -ENOENT;
if (bl->is_buf_ring) { if (bl->flags & IOBL_BUF_RING) {
ret = io_ring_buffers_peek(req, arg, bl); ret = io_ring_buffers_peek(req, arg, bl);
if (ret > 0) if (ret > 0)
req->flags |= REQ_F_BUFFERS_COMMIT; req->flags |= REQ_F_BUFFERS_COMMIT;
...@@ -340,22 +340,22 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, ...@@ -340,22 +340,22 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
if (!nbufs) if (!nbufs)
return 0; return 0;
if (bl->is_buf_ring) { if (bl->flags & IOBL_BUF_RING) {
i = bl->buf_ring->tail - bl->head; i = bl->buf_ring->tail - bl->head;
if (bl->buf_nr_pages) { if (bl->buf_nr_pages) {
int j; int j;
if (!bl->is_mmap) { if (!(bl->flags & IOBL_MMAP)) {
for (j = 0; j < bl->buf_nr_pages; j++) for (j = 0; j < bl->buf_nr_pages; j++)
unpin_user_page(bl->buf_pages[j]); unpin_user_page(bl->buf_pages[j]);
} }
io_pages_unmap(bl->buf_ring, &bl->buf_pages, io_pages_unmap(bl->buf_ring, &bl->buf_pages,
&bl->buf_nr_pages, bl->is_mmap); &bl->buf_nr_pages, bl->flags & IOBL_MMAP);
bl->is_mmap = 0; bl->flags &= ~IOBL_MMAP;
} }
/* make sure it's seen as empty */ /* make sure it's seen as empty */
INIT_LIST_HEAD(&bl->buf_list); INIT_LIST_HEAD(&bl->buf_list);
bl->is_buf_ring = 0; bl->flags &= ~IOBL_BUF_RING;
return i; return i;
} }
...@@ -442,7 +442,7 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -442,7 +442,7 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
if (bl) { if (bl) {
ret = -EINVAL; ret = -EINVAL;
/* can't use provide/remove buffers command on mapped buffers */ /* can't use provide/remove buffers command on mapped buffers */
if (!bl->is_buf_ring) if (!(bl->flags & IOBL_BUF_RING))
ret = __io_remove_buffers(ctx, bl, p->nbufs); ret = __io_remove_buffers(ctx, bl, p->nbufs);
} }
io_ring_submit_unlock(ctx, issue_flags); io_ring_submit_unlock(ctx, issue_flags);
...@@ -589,7 +589,7 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -589,7 +589,7 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
} }
} }
/* can't add buffers via this command for a mapped buffer ring */ /* can't add buffers via this command for a mapped buffer ring */
if (bl->is_buf_ring) { if (bl->flags & IOBL_BUF_RING) {
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
...@@ -641,8 +641,8 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg, ...@@ -641,8 +641,8 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
bl->buf_pages = pages; bl->buf_pages = pages;
bl->buf_nr_pages = nr_pages; bl->buf_nr_pages = nr_pages;
bl->buf_ring = br; bl->buf_ring = br;
bl->is_buf_ring = 1; bl->flags |= IOBL_BUF_RING;
bl->is_mmap = 0; bl->flags &= ~IOBL_MMAP;
return 0; return 0;
error_unpin: error_unpin:
unpin_user_pages(pages, nr_pages); unpin_user_pages(pages, nr_pages);
...@@ -665,8 +665,7 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx, ...@@ -665,8 +665,7 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
return -ENOMEM; return -ENOMEM;
} }
bl->is_buf_ring = 1; bl->flags |= (IOBL_BUF_RING | IOBL_MMAP);
bl->is_mmap = 1;
return 0; return 0;
} }
...@@ -705,7 +704,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) ...@@ -705,7 +704,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, reg.bgid); bl = io_buffer_get_list(ctx, reg.bgid);
if (bl) { if (bl) {
/* if mapped buffer ring OR classic exists, don't allow */ /* if mapped buffer ring OR classic exists, don't allow */
if (bl->is_buf_ring || !list_empty(&bl->buf_list)) if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
return -EEXIST; return -EEXIST;
} else { } else {
free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
...@@ -747,7 +746,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) ...@@ -747,7 +746,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, reg.bgid); bl = io_buffer_get_list(ctx, reg.bgid);
if (!bl) if (!bl)
return -ENOENT; return -ENOENT;
if (!bl->is_buf_ring) if (!(bl->flags & IOBL_BUF_RING))
return -EINVAL; return -EINVAL;
xa_erase(&ctx->io_bl_xa, bl->bgid); xa_erase(&ctx->io_bl_xa, bl->bgid);
...@@ -771,7 +770,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg) ...@@ -771,7 +770,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, buf_status.buf_group); bl = io_buffer_get_list(ctx, buf_status.buf_group);
if (!bl) if (!bl)
return -ENOENT; return -ENOENT;
if (!bl->is_buf_ring) if (!(bl->flags & IOBL_BUF_RING))
return -EINVAL; return -EINVAL;
buf_status.head = bl->head; buf_status.head = bl->head;
...@@ -802,7 +801,7 @@ struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, ...@@ -802,7 +801,7 @@ struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
bl = xa_load(&ctx->io_bl_xa, bgid); bl = xa_load(&ctx->io_bl_xa, bgid);
/* must be a mmap'able buffer ring and have pages */ /* must be a mmap'able buffer ring and have pages */
ret = false; ret = false;
if (bl && bl->is_mmap) if (bl && bl->flags & IOBL_MMAP)
ret = atomic_inc_not_zero(&bl->refs); ret = atomic_inc_not_zero(&bl->refs);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -4,6 +4,13 @@ ...@@ -4,6 +4,13 @@
#include <uapi/linux/io_uring.h> #include <uapi/linux/io_uring.h>
enum {
/* ring mapped provided buffers */
IOBL_BUF_RING = 1,
/* ring mapped provided buffers, but mmap'ed by application */
IOBL_MMAP = 2,
};
struct io_buffer_list { struct io_buffer_list {
/* /*
* If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
...@@ -25,12 +32,9 @@ struct io_buffer_list { ...@@ -25,12 +32,9 @@ struct io_buffer_list {
__u16 head; __u16 head;
__u16 mask; __u16 mask;
atomic_t refs; __u16 flags;
/* ring mapped provided buffers */ atomic_t refs;
__u8 is_buf_ring;
/* ring mapped provided buffers, but mmap'ed by application */
__u8 is_mmap;
}; };
struct io_buffer { struct io_buffer {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment