Commit 9e15c3a0 authored by Jens Axboe's avatar Jens Axboe

io_uring: convert io_buffer_idr to XArray

Like we did for the personality idr, convert the IO buffer idr to use
XArray. This avoids a use-after-free on removal of entries, since idr
doesn't like doing so from inside an iterator, and it nicely reduces
the amount of code we need to support this feature.

Fixes: 5a2e745d ("io_uring: buffer registration infrastructure")
Cc: stable@vger.kernel.org
Cc: Matthew Wilcox <willy@infradead.org>
Cc: yangerkun <yangerkun@huawei.com>
Reported-by: default avatarHulk Robot <hulkci@huawei.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 16efa4fc
...@@ -402,7 +402,7 @@ struct io_ring_ctx { ...@@ -402,7 +402,7 @@ struct io_ring_ctx {
struct socket *ring_sock; struct socket *ring_sock;
#endif #endif
struct idr io_buffer_idr; struct xarray io_buffers;
struct xarray personalities; struct xarray personalities;
u32 pers_next; u32 pers_next;
...@@ -1135,7 +1135,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -1135,7 +1135,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
init_waitqueue_head(&ctx->cq_wait); init_waitqueue_head(&ctx->cq_wait);
INIT_LIST_HEAD(&ctx->cq_overflow_list); INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->ref_comp); init_completion(&ctx->ref_comp);
idr_init(&ctx->io_buffer_idr); xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock); mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait); init_waitqueue_head(&ctx->wait);
...@@ -2843,7 +2843,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -2843,7 +2843,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
lockdep_assert_held(&req->ctx->uring_lock); lockdep_assert_held(&req->ctx->uring_lock);
head = idr_find(&req->ctx->io_buffer_idr, bgid); head = xa_load(&req->ctx->io_buffers, bgid);
if (head) { if (head) {
if (!list_empty(&head->list)) { if (!list_empty(&head->list)) {
kbuf = list_last_entry(&head->list, struct io_buffer, kbuf = list_last_entry(&head->list, struct io_buffer,
...@@ -2851,7 +2851,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -2851,7 +2851,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
list_del(&kbuf->list); list_del(&kbuf->list);
} else { } else {
kbuf = head; kbuf = head;
idr_remove(&req->ctx->io_buffer_idr, bgid); xa_erase(&req->ctx->io_buffers, bgid);
} }
if (*len > kbuf->len) if (*len > kbuf->len)
*len = kbuf->len; *len = kbuf->len;
...@@ -3892,7 +3892,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, ...@@ -3892,7 +3892,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
} }
i++; i++;
kfree(buf); kfree(buf);
idr_remove(&ctx->io_buffer_idr, bgid); xa_erase(&ctx->io_buffers, bgid);
return i; return i;
} }
...@@ -3910,7 +3910,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -3910,7 +3910,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
lockdep_assert_held(&ctx->uring_lock); lockdep_assert_held(&ctx->uring_lock);
ret = -ENOENT; ret = -ENOENT;
head = idr_find(&ctx->io_buffer_idr, p->bgid); head = xa_load(&ctx->io_buffers, p->bgid);
if (head) if (head)
ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs); ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
if (ret < 0) if (ret < 0)
...@@ -3993,21 +3993,14 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -3993,21 +3993,14 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
lockdep_assert_held(&ctx->uring_lock); lockdep_assert_held(&ctx->uring_lock);
list = head = idr_find(&ctx->io_buffer_idr, p->bgid); list = head = xa_load(&ctx->io_buffers, p->bgid);
ret = io_add_buffers(p, &head); ret = io_add_buffers(p, &head);
if (ret < 0) if (ret >= 0 && !list) {
goto out; ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
if (ret < 0)
if (!list) {
ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
GFP_KERNEL);
if (ret < 0) {
__io_remove_buffers(ctx, head, p->bgid, -1U); __io_remove_buffers(ctx, head, p->bgid, -1U);
goto out;
}
} }
out:
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
...@@ -8333,19 +8326,13 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx) ...@@ -8333,19 +8326,13 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
return -ENXIO; return -ENXIO;
} }
static int __io_destroy_buffers(int id, void *p, void *data)
{
struct io_ring_ctx *ctx = data;
struct io_buffer *buf = p;
__io_remove_buffers(ctx, buf, id, -1U);
return 0;
}
static void io_destroy_buffers(struct io_ring_ctx *ctx) static void io_destroy_buffers(struct io_ring_ctx *ctx)
{ {
idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx); struct io_buffer *buf;
idr_destroy(&ctx->io_buffer_idr); unsigned long index;
xa_for_each(&ctx->io_buffers, index, buf)
__io_remove_buffers(ctx, buf, index, -1U);
} }
static void io_req_cache_free(struct list_head *list, struct task_struct *tsk) static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment