Commit 294e73ff authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-6.1-2022-10-20' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:

 - Fix a potential memory leak in the error handling path of io-wq setup
   (Rafael)

 - Kill an errant debug statement that got added in this release (me)

 - Fix an oops with an invalid direct descriptor with IORING_OP_MSG_RING
   (Harshit)

 - Remove unneeded FFS_SCM flagging (Pavel)

 - Remove polling off the exit path (Pavel)

 - Move out direct descriptor debug check to the cleanup path (Pavel)

 - Use the proper helper rather than open-coding cached request get
   (Pavel)

* tag 'io_uring-6.1-2022-10-20' of git://git.kernel.dk/linux:
  io-wq: Fix memory leak in worker creation
  io_uring/msg_ring: Fix NULL pointer dereference in io_msg_send_fd()
  io_uring/rw: remove leftover debug statement
  io_uring: don't iopoll from io_ring_ctx_wait_and_kill()
  io_uring: reuse io_alloc_req()
  io_uring: kill hot path fixed file bitmap debug checks
  io_uring: remove FFS_SCM
parents 1d61754c 996d3efe
...@@ -5,22 +5,9 @@ ...@@ -5,22 +5,9 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/io_uring_types.h> #include <linux/io_uring_types.h>
/*
* FFS_SCM is only available on 64-bit archs, for 32-bit we just define it as 0
* and define IO_URING_SCM_ALL. For this case, we use SCM for all files as we
* can't safely always dereference the file when the task has exited and ring
* cleanup is done. If a file is tracked and part of SCM, then unix gc on
* process exit may reap it before __io_sqe_files_unregister() is run.
*/
#define FFS_NOWAIT 0x1UL #define FFS_NOWAIT 0x1UL
#define FFS_ISREG 0x2UL #define FFS_ISREG 0x2UL
#if defined(CONFIG_64BIT) #define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG)
#define FFS_SCM 0x4UL
#else
#define IO_URING_SCM_ALL
#define FFS_SCM 0x0UL
#endif
#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG|FFS_SCM)
bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files); bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files);
void io_free_file_tables(struct io_file_table *table); void io_free_file_tables(struct io_file_table *table);
...@@ -38,6 +25,7 @@ unsigned int io_file_get_flags(struct file *file); ...@@ -38,6 +25,7 @@ unsigned int io_file_get_flags(struct file *file);
static inline void io_file_bitmap_clear(struct io_file_table *table, int bit) static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
{ {
WARN_ON_ONCE(!test_bit(bit, table->bitmap));
__clear_bit(bit, table->bitmap); __clear_bit(bit, table->bitmap);
table->alloc_hint = bit; table->alloc_hint = bit;
} }
......
...@@ -1164,10 +1164,10 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) ...@@ -1164,10 +1164,10 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
if (!wqe) if (!wqe)
goto err; goto err;
wq->wqes[node] = wqe;
if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL)) if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
goto err; goto err;
cpumask_copy(wqe->cpu_mask, cpumask_of_node(node)); cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
wq->wqes[node] = wqe;
wqe->node = alloc_node; wqe->node = alloc_node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
......
...@@ -1587,8 +1587,6 @@ unsigned int io_file_get_flags(struct file *file) ...@@ -1587,8 +1587,6 @@ unsigned int io_file_get_flags(struct file *file)
res |= FFS_ISREG; res |= FFS_ISREG;
if (__io_file_supports_nowait(file, mode)) if (__io_file_supports_nowait(file, mode))
res |= FFS_NOWAIT; res |= FFS_NOWAIT;
if (io_file_need_scm(file))
res |= FFS_SCM;
return res; return res;
} }
...@@ -1860,7 +1858,6 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, ...@@ -1860,7 +1858,6 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
/* mask in overlapping REQ_F and FFS bits */ /* mask in overlapping REQ_F and FFS bits */
req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT); req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
io_req_set_rsrc_node(req, ctx, 0); io_req_set_rsrc_node(req, ctx, 0);
WARN_ON_ONCE(file && !test_bit(fd, ctx->file_table.bitmap));
out: out:
io_ring_submit_unlock(ctx, issue_flags); io_ring_submit_unlock(ctx, issue_flags);
return file; return file;
...@@ -2563,18 +2560,14 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx) ...@@ -2563,18 +2560,14 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
static void io_req_caches_free(struct io_ring_ctx *ctx) static void io_req_caches_free(struct io_ring_ctx *ctx)
{ {
struct io_submit_state *state = &ctx->submit_state;
int nr = 0; int nr = 0;
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
io_flush_cached_locked_reqs(ctx, state); io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
while (!io_req_cache_empty(ctx)) { while (!io_req_cache_empty(ctx)) {
struct io_wq_work_node *node; struct io_kiocb *req = io_alloc_req(ctx);
struct io_kiocb *req;
node = wq_stack_extract(&state->free_list);
req = container_of(node, struct io_kiocb, comp_list);
kmem_cache_free(req_cachep, req); kmem_cache_free(req_cachep, req);
nr++; nr++;
} }
...@@ -2811,15 +2804,12 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -2811,15 +2804,12 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_poll_remove_all(ctx, NULL, true); io_poll_remove_all(ctx, NULL, true);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
/* failed during ring init, it couldn't have issued any requests */ /*
if (ctx->rings) { * If we failed setting up the ctx, we might not have any rings
* and therefore did not submit any requests
*/
if (ctx->rings)
io_kill_timeouts(ctx, NULL, true); io_kill_timeouts(ctx, NULL, true);
/* if we failed setting up the ctx, we might not have any rings */
io_iopoll_try_reap_events(ctx);
/* drop cached put refs after potentially doing completions */
if (current->io_uring)
io_uring_drop_tctx_refs(current);
}
INIT_WORK(&ctx->exit_work, io_ring_exit_work); INIT_WORK(&ctx->exit_work, io_ring_exit_work);
/* /*
......
...@@ -95,6 +95,9 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) ...@@ -95,6 +95,9 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files); msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files);
file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr; file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr;
if (!file_ptr)
goto out_unlock;
src_file = (struct file *) (file_ptr & FFS_MASK); src_file = (struct file *) (file_ptr & FFS_MASK);
get_file(src_file); get_file(src_file);
......
...@@ -757,20 +757,17 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, ...@@ -757,20 +757,17 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
void __io_sqe_files_unregister(struct io_ring_ctx *ctx) void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
{ {
#if !defined(IO_URING_SCM_ALL)
int i; int i;
for (i = 0; i < ctx->nr_user_files; i++) { for (i = 0; i < ctx->nr_user_files; i++) {
struct file *file = io_file_from_index(&ctx->file_table, i); struct file *file = io_file_from_index(&ctx->file_table, i);
if (!file) /* skip scm accounted files, they'll be freed by ->ring_sock */
continue; if (!file || io_file_need_scm(file))
if (io_fixed_file_slot(&ctx->file_table, i)->file_ptr & FFS_SCM)
continue; continue;
io_file_bitmap_clear(&ctx->file_table, i); io_file_bitmap_clear(&ctx->file_table, i);
fput(file); fput(file);
} }
#endif
#if defined(CONFIG_UNIX) #if defined(CONFIG_UNIX)
if (ctx->ring_sock) { if (ctx->ring_sock) {
......
...@@ -82,11 +82,7 @@ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file); ...@@ -82,11 +82,7 @@ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
#if defined(CONFIG_UNIX) #if defined(CONFIG_UNIX)
static inline bool io_file_need_scm(struct file *filp) static inline bool io_file_need_scm(struct file *filp)
{ {
#if defined(IO_URING_SCM_ALL)
return true;
#else
return !!unix_get_socket(filp); return !!unix_get_socket(filp);
#endif
} }
#else #else
static inline bool io_file_need_scm(struct file *filp) static inline bool io_file_need_scm(struct file *filp)
......
...@@ -242,8 +242,6 @@ static void io_req_io_end(struct io_kiocb *req) ...@@ -242,8 +242,6 @@ static void io_req_io_end(struct io_kiocb *req)
{ {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
WARN_ON(!in_task());
if (rw->kiocb.ki_flags & IOCB_WRITE) { if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req); kiocb_end_write(req);
fsnotify_modify(req->file); fsnotify_modify(req->file);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment