Commit 8435c6f3 authored by Jens Axboe's avatar Jens Axboe

io_uring/kbuf: cleanup passing back cflags

We have various functions calculating the CQE cflags we need to pass
back, but it's all the same everywhere. Make a number of the putting
functions void, and just have the two main helps for this, io_put_kbuf()
and io_put_kbuf_comp() calculate the actual mask and pass it back.

While at it, cleanup how we put REQ_F_BUFFER_RING buffers. Before
this change, we would call into __io_put_kbuf() only to go right back
in to the header defined functions. As clearing this type of buffer
is just re-assigning the buf_index and incrementing the head, this
is very wasteful.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 949249e2
...@@ -102,10 +102,8 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) ...@@ -102,10 +102,8 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
return true; return true;
} }
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
{ {
unsigned int cflags;
/* /*
* We can add this buffer back to two lists: * We can add this buffer back to two lists:
* *
...@@ -118,21 +116,17 @@ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) ...@@ -118,21 +116,17 @@ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
* We migrate buffers from the comp_list to the issue cache list * We migrate buffers from the comp_list to the issue cache list
* when we need one. * when we need one.
*/ */
if (req->flags & REQ_F_BUFFER_RING) { if (issue_flags & IO_URING_F_UNLOCKED) {
/* no buffers to recycle for this case */
cflags = __io_put_kbuf_list(req, NULL);
} else if (issue_flags & IO_URING_F_UNLOCKED) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp); __io_put_kbuf_list(req, &ctx->io_buffers_comp);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
} else { } else {
lockdep_assert_held(&req->ctx->uring_lock); lockdep_assert_held(&req->ctx->uring_lock);
cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache); __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
} }
return cflags;
} }
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
......
...@@ -57,7 +57,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); ...@@ -57,7 +57,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx); void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
...@@ -108,41 +108,54 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) ...@@ -108,41 +108,54 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
return false; return false;
} }
static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req, static inline void __io_put_kbuf_ring(struct io_kiocb *req)
struct list_head *list)
{ {
unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
if (req->flags & REQ_F_BUFFER_RING) {
if (req->buf_list) { if (req->buf_list) {
req->buf_index = req->buf_list->bgid; req->buf_index = req->buf_list->bgid;
req->buf_list->head++; req->buf_list->head++;
} }
req->flags &= ~REQ_F_BUFFER_RING; req->flags &= ~REQ_F_BUFFER_RING;
}
static inline void __io_put_kbuf_list(struct io_kiocb *req,
struct list_head *list)
{
if (req->flags & REQ_F_BUFFER_RING) {
__io_put_kbuf_ring(req);
} else { } else {
req->buf_index = req->kbuf->bgid; req->buf_index = req->kbuf->bgid;
list_add(&req->kbuf->list, list); list_add(&req->kbuf->list, list);
req->flags &= ~REQ_F_BUFFER_SELECTED; req->flags &= ~REQ_F_BUFFER_SELECTED;
} }
return ret;
} }
static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req) static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
{ {
unsigned int ret;
lockdep_assert_held(&req->ctx->completion_lock); lockdep_assert_held(&req->ctx->completion_lock);
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return 0; return 0;
return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
__io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
return ret;
} }
static inline unsigned int io_put_kbuf(struct io_kiocb *req, static inline unsigned int io_put_kbuf(struct io_kiocb *req,
unsigned issue_flags) unsigned issue_flags)
{ {
unsigned int ret;
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0; return 0;
return __io_put_kbuf(req, issue_flags);
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
if (req->flags & REQ_F_BUFFER_RING)
__io_put_kbuf_ring(req);
else
__io_put_kbuf(req, issue_flags);
return ret;
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment