Commit 6ad4bf6e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.10-2020-10-12' of git://git.kernel.dk/linux-block

Pull io_uring updates from Jens Axboe:

 - Add blkcg accounting for io-wq offload (Dennis)

 - A use-after-free fix for io-wq (Hillf)

 - Cancelation fixes and improvements

 - Use proper files_struct references for offload

 - Cleanup of io_uring_get_socket() since that can now go into our own
   header

 - SQPOLL fixes and cleanups, and support for sharing the thread

 - Improvement to how page accounting is done for registered buffers and
   huge pages, accounting the real pinned state

 - Series cleaning up the xarray code (Willy)

 - Various cleanups, refactoring, and improvements (Pavel)

 - Use raw spinlock for io-wq (Sebastian)

 - Add support for ring restrictions (Stefano)

* tag 'io_uring-5.10-2020-10-12' of git://git.kernel.dk/linux-block: (62 commits)
  io_uring: keep a pointer ref_node in file_data
  io_uring: refactor *files_register()'s error paths
  io_uring: clean file_data access in files_register
  io_uring: don't delay io_init_req() error check
  io_uring: clean leftovers after splitting issue
  io_uring: remove timeout.list after hrtimer cancel
  io_uring: use a separate struct for timeout_remove
  io_uring: improve submit_state.ios_left accounting
  io_uring: simplify io_file_get()
  io_uring: kill extra check in fixed io_file_get()
  io_uring: clean up ->files grabbing
  io_uring: don't io_prep_async_work() linked reqs
  io_uring: Convert advanced XArray uses to the normal API
  io_uring: Fix XArray usage in io_uring_add_task_file
  io_uring: Fix use of XArray in __io_uring_files_cancel
  io_uring: fix break condition for __io_uring_register() waiting
  io_uring: no need to call xa_destroy() on empty xarray
  io_uring: batch account ->req_issue and task struct references
  io_uring: kill callback_head argument for io_req_task_work_add()
  io_uring: move req preps out of io_issue_sqe()
  ...
parents 3ad11d7a b2e96852
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
#include <linux/oom.h> #include <linux/oom.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/io_uring.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -1895,6 +1896,11 @@ static int bprm_execve(struct linux_binprm *bprm, ...@@ -1895,6 +1896,11 @@ static int bprm_execve(struct linux_binprm *bprm,
struct files_struct *displaced; struct files_struct *displaced;
int retval; int retval;
/*
* Cancel any io_uring activity across execve
*/
io_uring_task_cancel();
retval = unshare_files(&displaced); retval = unshare_files(&displaced);
if (retval) if (retval)
return retval; return retval;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/close_range.h> #include <linux/close_range.h>
#include <net/sock.h> #include <net/sock.h>
#include <linux/io_uring.h>
unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open __read_mostly = 1024*1024;
unsigned int sysctl_nr_open_min = BITS_PER_LONG; unsigned int sysctl_nr_open_min = BITS_PER_LONG;
...@@ -452,6 +453,7 @@ void exit_files(struct task_struct *tsk) ...@@ -452,6 +453,7 @@ void exit_files(struct task_struct *tsk)
struct files_struct * files = tsk->files; struct files_struct * files = tsk->files;
if (files) { if (files) {
io_uring_files_cancel(files);
task_lock(tsk); task_lock(tsk);
tsk->files = NULL; tsk->files = NULL;
task_unlock(tsk); task_unlock(tsk);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/rculist_nulls.h> #include <linux/rculist_nulls.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/task_work.h> #include <linux/task_work.h>
#include <linux/blk-cgroup.h>
#include "io-wq.h" #include "io-wq.h"
...@@ -26,9 +27,8 @@ enum { ...@@ -26,9 +27,8 @@ enum {
IO_WORKER_F_UP = 1, /* up and active */ IO_WORKER_F_UP = 1, /* up and active */
IO_WORKER_F_RUNNING = 2, /* account as running */ IO_WORKER_F_RUNNING = 2, /* account as running */
IO_WORKER_F_FREE = 4, /* worker on free list */ IO_WORKER_F_FREE = 4, /* worker on free list */
IO_WORKER_F_EXITING = 8, /* worker exiting */ IO_WORKER_F_FIXED = 8, /* static idle worker */
IO_WORKER_F_FIXED = 16, /* static idle worker */ IO_WORKER_F_BOUND = 16, /* is doing bounded work */
IO_WORKER_F_BOUND = 32, /* is doing bounded work */
}; };
enum { enum {
...@@ -57,9 +57,13 @@ struct io_worker { ...@@ -57,9 +57,13 @@ struct io_worker {
struct rcu_head rcu; struct rcu_head rcu;
struct mm_struct *mm; struct mm_struct *mm;
#ifdef CONFIG_BLK_CGROUP
struct cgroup_subsys_state *blkcg_css;
#endif
const struct cred *cur_creds; const struct cred *cur_creds;
const struct cred *saved_creds; const struct cred *saved_creds;
struct files_struct *restore_files; struct files_struct *restore_files;
struct nsproxy *restore_nsproxy;
struct fs_struct *restore_fs; struct fs_struct *restore_fs;
}; };
...@@ -87,7 +91,7 @@ enum { ...@@ -87,7 +91,7 @@ enum {
*/ */
struct io_wqe { struct io_wqe {
struct { struct {
spinlock_t lock; raw_spinlock_t lock;
struct io_wq_work_list work_list; struct io_wq_work_list work_list;
unsigned long hash_map; unsigned long hash_map;
unsigned flags; unsigned flags;
...@@ -148,11 +152,12 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) ...@@ -148,11 +152,12 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
if (current->files != worker->restore_files) { if (current->files != worker->restore_files) {
__acquire(&wqe->lock); __acquire(&wqe->lock);
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
dropped_lock = true; dropped_lock = true;
task_lock(current); task_lock(current);
current->files = worker->restore_files; current->files = worker->restore_files;
current->nsproxy = worker->restore_nsproxy;
task_unlock(current); task_unlock(current);
} }
...@@ -166,7 +171,7 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) ...@@ -166,7 +171,7 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
if (worker->mm) { if (worker->mm) {
if (!dropped_lock) { if (!dropped_lock) {
__acquire(&wqe->lock); __acquire(&wqe->lock);
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
dropped_lock = true; dropped_lock = true;
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -175,6 +180,13 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) ...@@ -175,6 +180,13 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
worker->mm = NULL; worker->mm = NULL;
} }
#ifdef CONFIG_BLK_CGROUP
if (worker->blkcg_css) {
kthread_associate_blkcg(NULL);
worker->blkcg_css = NULL;
}
#endif
return dropped_lock; return dropped_lock;
} }
...@@ -200,7 +212,6 @@ static void io_worker_exit(struct io_worker *worker) ...@@ -200,7 +212,6 @@ static void io_worker_exit(struct io_worker *worker)
{ {
struct io_wqe *wqe = worker->wqe; struct io_wqe *wqe = worker->wqe;
struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
unsigned nr_workers;
/* /*
* If we're not at zero, someone else is holding a brief reference * If we're not at zero, someone else is holding a brief reference
...@@ -220,23 +231,19 @@ static void io_worker_exit(struct io_worker *worker) ...@@ -220,23 +231,19 @@ static void io_worker_exit(struct io_worker *worker)
worker->flags = 0; worker->flags = 0;
preempt_enable(); preempt_enable();
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
hlist_nulls_del_rcu(&worker->nulls_node); hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list); list_del_rcu(&worker->all_list);
if (__io_worker_unuse(wqe, worker)) { if (__io_worker_unuse(wqe, worker)) {
__release(&wqe->lock); __release(&wqe->lock);
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
} }
acct->nr_workers--; acct->nr_workers--;
nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers + raw_spin_unlock_irq(&wqe->lock);
wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
spin_unlock_irq(&wqe->lock);
/* all workers gone, wq exit can proceed */
if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
complete(&wqe->wq->done);
kfree_rcu(worker, rcu); kfree_rcu(worker, rcu);
if (refcount_dec_and_test(&wqe->wq->refs))
complete(&wqe->wq->done);
} }
static inline bool io_wqe_run_queue(struct io_wqe *wqe) static inline bool io_wqe_run_queue(struct io_wqe *wqe)
...@@ -318,6 +325,7 @@ static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker) ...@@ -318,6 +325,7 @@ static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
worker->restore_files = current->files; worker->restore_files = current->files;
worker->restore_nsproxy = current->nsproxy;
worker->restore_fs = current->fs; worker->restore_fs = current->fs;
io_wqe_inc_running(wqe, worker); io_wqe_inc_running(wqe, worker);
} }
...@@ -436,6 +444,17 @@ static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) ...@@ -436,6 +444,17 @@ static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
work->flags |= IO_WQ_WORK_CANCEL; work->flags |= IO_WQ_WORK_CANCEL;
} }
static inline void io_wq_switch_blkcg(struct io_worker *worker,
struct io_wq_work *work)
{
#ifdef CONFIG_BLK_CGROUP
if (work->blkcg_css != worker->blkcg_css) {
kthread_associate_blkcg(work->blkcg_css);
worker->blkcg_css = work->blkcg_css;
}
#endif
}
static void io_wq_switch_creds(struct io_worker *worker, static void io_wq_switch_creds(struct io_worker *worker,
struct io_wq_work *work) struct io_wq_work *work)
{ {
...@@ -454,6 +473,7 @@ static void io_impersonate_work(struct io_worker *worker, ...@@ -454,6 +473,7 @@ static void io_impersonate_work(struct io_worker *worker,
if (work->files && current->files != work->files) { if (work->files && current->files != work->files) {
task_lock(current); task_lock(current);
current->files = work->files; current->files = work->files;
current->nsproxy = work->nsproxy;
task_unlock(current); task_unlock(current);
} }
if (work->fs && current->fs != work->fs) if (work->fs && current->fs != work->fs)
...@@ -463,6 +483,7 @@ static void io_impersonate_work(struct io_worker *worker, ...@@ -463,6 +483,7 @@ static void io_impersonate_work(struct io_worker *worker,
if (worker->cur_creds != work->creds) if (worker->cur_creds != work->creds)
io_wq_switch_creds(worker, work); io_wq_switch_creds(worker, work);
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->fsize; current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->fsize;
io_wq_switch_blkcg(worker, work);
} }
static void io_assign_current_work(struct io_worker *worker, static void io_assign_current_work(struct io_worker *worker,
...@@ -504,7 +525,7 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -504,7 +525,7 @@ static void io_worker_handle_work(struct io_worker *worker)
else if (!wq_list_empty(&wqe->work_list)) else if (!wq_list_empty(&wqe->work_list))
wqe->flags |= IO_WQE_FLAG_STALLED; wqe->flags |= IO_WQE_FLAG_STALLED;
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
if (!work) if (!work)
break; break;
io_assign_current_work(worker, work); io_assign_current_work(worker, work);
...@@ -538,17 +559,17 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -538,17 +559,17 @@ static void io_worker_handle_work(struct io_worker *worker)
io_wqe_enqueue(wqe, linked); io_wqe_enqueue(wqe, linked);
if (hash != -1U && !next_hashed) { if (hash != -1U && !next_hashed) {
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
wqe->hash_map &= ~BIT_ULL(hash); wqe->hash_map &= ~BIT_ULL(hash);
wqe->flags &= ~IO_WQE_FLAG_STALLED; wqe->flags &= ~IO_WQE_FLAG_STALLED;
/* skip unnecessary unlock-lock wqe->lock */ /* skip unnecessary unlock-lock wqe->lock */
if (!work) if (!work)
goto get_next; goto get_next;
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
} }
} while (work); } while (work);
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
} while (1); } while (1);
} }
...@@ -563,7 +584,7 @@ static int io_wqe_worker(void *data) ...@@ -563,7 +584,7 @@ static int io_wqe_worker(void *data)
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
loop: loop:
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
if (io_wqe_run_queue(wqe)) { if (io_wqe_run_queue(wqe)) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
io_worker_handle_work(worker); io_worker_handle_work(worker);
...@@ -574,7 +595,7 @@ static int io_wqe_worker(void *data) ...@@ -574,7 +595,7 @@ static int io_wqe_worker(void *data)
__release(&wqe->lock); __release(&wqe->lock);
goto loop; goto loop;
} }
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
if (signal_pending(current)) if (signal_pending(current))
flush_signals(current); flush_signals(current);
if (schedule_timeout(WORKER_IDLE_TIMEOUT)) if (schedule_timeout(WORKER_IDLE_TIMEOUT))
...@@ -586,11 +607,11 @@ static int io_wqe_worker(void *data) ...@@ -586,11 +607,11 @@ static int io_wqe_worker(void *data)
} }
if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
if (!wq_list_empty(&wqe->work_list)) if (!wq_list_empty(&wqe->work_list))
io_worker_handle_work(worker); io_worker_handle_work(worker);
else else
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
} }
io_worker_exit(worker); io_worker_exit(worker);
...@@ -630,14 +651,14 @@ void io_wq_worker_sleeping(struct task_struct *tsk) ...@@ -630,14 +651,14 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
worker->flags &= ~IO_WORKER_F_RUNNING; worker->flags &= ~IO_WORKER_F_RUNNING;
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
io_wqe_dec_running(wqe, worker); io_wqe_dec_running(wqe, worker);
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
} }
static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
{ {
struct io_wqe_acct *acct =&wqe->acct[index]; struct io_wqe_acct *acct = &wqe->acct[index];
struct io_worker *worker; struct io_worker *worker;
worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
...@@ -656,7 +677,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) ...@@ -656,7 +677,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
return false; return false;
} }
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
list_add_tail_rcu(&worker->all_list, &wqe->all_list); list_add_tail_rcu(&worker->all_list, &wqe->all_list);
worker->flags |= IO_WORKER_F_FREE; worker->flags |= IO_WORKER_F_FREE;
...@@ -665,11 +686,12 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) ...@@ -665,11 +686,12 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND)) if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
worker->flags |= IO_WORKER_F_FIXED; worker->flags |= IO_WORKER_F_FIXED;
acct->nr_workers++; acct->nr_workers++;
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
if (index == IO_WQ_ACCT_UNBOUND) if (index == IO_WQ_ACCT_UNBOUND)
atomic_inc(&wq->user->processes); atomic_inc(&wq->user->processes);
refcount_inc(&wq->refs);
wake_up_process(worker->task); wake_up_process(worker->task);
return true; return true;
} }
...@@ -685,28 +707,63 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index) ...@@ -685,28 +707,63 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
return acct->nr_workers < acct->max_workers; return acct->nr_workers < acct->max_workers;
} }
static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
{
send_sig(SIGINT, worker->task, 1);
return false;
}
/*
* Iterate the passed in list and call the specific function for each
* worker that isn't exiting
*/
static bool io_wq_for_each_worker(struct io_wqe *wqe,
bool (*func)(struct io_worker *, void *),
void *data)
{
struct io_worker *worker;
bool ret = false;
list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
if (io_worker_get(worker)) {
/* no task if node is/was offline */
if (worker->task)
ret = func(worker, data);
io_worker_release(worker);
if (ret)
break;
}
}
return ret;
}
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
wake_up_process(worker->task);
return false;
}
/* /*
* Manager thread. Tasked with creating new workers, if we need them. * Manager thread. Tasked with creating new workers, if we need them.
*/ */
static int io_wq_manager(void *data) static int io_wq_manager(void *data)
{ {
struct io_wq *wq = data; struct io_wq *wq = data;
int workers_to_create = num_possible_nodes();
int node; int node;
/* create fixed workers */ /* create fixed workers */
refcount_set(&wq->refs, workers_to_create); refcount_set(&wq->refs, 1);
for_each_node(node) { for_each_node(node) {
if (!node_online(node)) if (!node_online(node))
continue; continue;
if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND)) if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
goto err; continue;
workers_to_create--; set_bit(IO_WQ_BIT_ERROR, &wq->state);
set_bit(IO_WQ_BIT_EXIT, &wq->state);
goto out;
} }
while (workers_to_create--)
refcount_dec(&wq->refs);
complete(&wq->done); complete(&wq->done);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
...@@ -720,12 +777,12 @@ static int io_wq_manager(void *data) ...@@ -720,12 +777,12 @@ static int io_wq_manager(void *data)
if (!node_online(node)) if (!node_online(node))
continue; continue;
spin_lock_irq(&wqe->lock); raw_spin_lock_irq(&wqe->lock);
if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND)) if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
fork_worker[IO_WQ_ACCT_BOUND] = true; fork_worker[IO_WQ_ACCT_BOUND] = true;
if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND)) if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
fork_worker[IO_WQ_ACCT_UNBOUND] = true; fork_worker[IO_WQ_ACCT_UNBOUND] = true;
spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
if (fork_worker[IO_WQ_ACCT_BOUND]) if (fork_worker[IO_WQ_ACCT_BOUND])
create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND); create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
if (fork_worker[IO_WQ_ACCT_UNBOUND]) if (fork_worker[IO_WQ_ACCT_UNBOUND])
...@@ -738,12 +795,18 @@ static int io_wq_manager(void *data) ...@@ -738,12 +795,18 @@ static int io_wq_manager(void *data)
if (current->task_works) if (current->task_works)
task_work_run(); task_work_run();
return 0; out:
err: if (refcount_dec_and_test(&wq->refs)) {
set_bit(IO_WQ_BIT_ERROR, &wq->state);
set_bit(IO_WQ_BIT_EXIT, &wq->state);
if (refcount_sub_and_test(workers_to_create, &wq->refs))
complete(&wq->done); complete(&wq->done);
return 0;
}
/* if ERROR is set and we get here, we have workers to wake */
if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
rcu_read_lock();
for_each_node(node)
io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
rcu_read_unlock();
}
return 0; return 0;
} }
...@@ -821,10 +884,10 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) ...@@ -821,10 +884,10 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
} }
work_flags = work->flags; work_flags = work->flags;
spin_lock_irqsave(&wqe->lock, flags); raw_spin_lock_irqsave(&wqe->lock, flags);
io_wqe_insert_work(wqe, work); io_wqe_insert_work(wqe, work);
wqe->flags &= ~IO_WQE_FLAG_STALLED; wqe->flags &= ~IO_WQE_FLAG_STALLED;
spin_unlock_irqrestore(&wqe->lock, flags); raw_spin_unlock_irqrestore(&wqe->lock, flags);
if ((work_flags & IO_WQ_WORK_CONCURRENT) || if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
!atomic_read(&acct->nr_running)) !atomic_read(&acct->nr_running))
...@@ -850,37 +913,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val) ...@@ -850,37 +913,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
} }
static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
{
send_sig(SIGINT, worker->task, 1);
return false;
}
/*
* Iterate the passed in list and call the specific function for each
* worker that isn't exiting
*/
static bool io_wq_for_each_worker(struct io_wqe *wqe,
bool (*func)(struct io_worker *, void *),
void *data)
{
struct io_worker *worker;
bool ret = false;
list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
if (io_worker_get(worker)) {
/* no task if node is/was offline */
if (worker->task)
ret = func(worker, data);
io_worker_release(worker);
if (ret)
break;
}
}
return ret;
}
void io_wq_cancel_all(struct io_wq *wq) void io_wq_cancel_all(struct io_wq *wq)
{ {
int node; int node;
...@@ -951,13 +983,13 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe, ...@@ -951,13 +983,13 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
unsigned long flags; unsigned long flags;
retry: retry:
spin_lock_irqsave(&wqe->lock, flags); raw_spin_lock_irqsave(&wqe->lock, flags);
wq_list_for_each(node, prev, &wqe->work_list) { wq_list_for_each(node, prev, &wqe->work_list) {
work = container_of(node, struct io_wq_work, list); work = container_of(node, struct io_wq_work, list);
if (!match->fn(work, match->data)) if (!match->fn(work, match->data))
continue; continue;
io_wqe_remove_pending(wqe, work, prev); io_wqe_remove_pending(wqe, work, prev);
spin_unlock_irqrestore(&wqe->lock, flags); raw_spin_unlock_irqrestore(&wqe->lock, flags);
io_run_cancel(work, wqe); io_run_cancel(work, wqe);
match->nr_pending++; match->nr_pending++;
if (!match->cancel_all) if (!match->cancel_all)
...@@ -966,7 +998,7 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe, ...@@ -966,7 +998,7 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
/* not safe to continue after unlock */ /* not safe to continue after unlock */
goto retry; goto retry;
} }
spin_unlock_irqrestore(&wqe->lock, flags); raw_spin_unlock_irqrestore(&wqe->lock, flags);
} }
static void io_wqe_cancel_running_work(struct io_wqe *wqe, static void io_wqe_cancel_running_work(struct io_wqe *wqe,
...@@ -1074,7 +1106,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) ...@@ -1074,7 +1106,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
} }
atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0); atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
wqe->wq = wq; wqe->wq = wq;
spin_lock_init(&wqe->lock); raw_spin_lock_init(&wqe->lock);
INIT_WQ_LIST(&wqe->work_list); INIT_WQ_LIST(&wqe->work_list);
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
INIT_LIST_HEAD(&wqe->all_list); INIT_LIST_HEAD(&wqe->all_list);
...@@ -1113,12 +1145,6 @@ bool io_wq_get(struct io_wq *wq, struct io_wq_data *data) ...@@ -1113,12 +1145,6 @@ bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
return refcount_inc_not_zero(&wq->use_refs); return refcount_inc_not_zero(&wq->use_refs);
} }
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
wake_up_process(worker->task);
return false;
}
static void __io_wq_destroy(struct io_wq *wq) static void __io_wq_destroy(struct io_wq *wq)
{ {
int node; int node;
......
...@@ -87,7 +87,11 @@ struct io_wq_work { ...@@ -87,7 +87,11 @@ struct io_wq_work {
struct io_wq_work_node list; struct io_wq_work_node list;
struct files_struct *files; struct files_struct *files;
struct mm_struct *mm; struct mm_struct *mm;
#ifdef CONFIG_BLK_CGROUP
struct cgroup_subsys_state *blkcg_css;
#endif
const struct cred *creds; const struct cred *creds;
struct nsproxy *nsproxy;
struct fs_struct *fs; struct fs_struct *fs;
unsigned long fsize; unsigned long fsize;
unsigned flags; unsigned flags;
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -302,17 +302,20 @@ enum rw_hint { ...@@ -302,17 +302,20 @@ enum rw_hint {
WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
}; };
#define IOCB_EVENTFD (1 << 0) /* Match RWF_* bits to IOCB bits */
#define IOCB_APPEND (1 << 1) #define IOCB_HIPRI (__force int) RWF_HIPRI
#define IOCB_DIRECT (1 << 2) #define IOCB_DSYNC (__force int) RWF_DSYNC
#define IOCB_HIPRI (1 << 3) #define IOCB_SYNC (__force int) RWF_SYNC
#define IOCB_DSYNC (1 << 4) #define IOCB_NOWAIT (__force int) RWF_NOWAIT
#define IOCB_SYNC (1 << 5) #define IOCB_APPEND (__force int) RWF_APPEND
#define IOCB_WRITE (1 << 6)
#define IOCB_NOWAIT (1 << 7) /* non-RWF related bits - start at 16 */
#define IOCB_EVENTFD (1 << 16)
#define IOCB_DIRECT (1 << 17)
#define IOCB_WRITE (1 << 18)
/* iocb->ki_waitq is valid */ /* iocb->ki_waitq is valid */
#define IOCB_WAITQ (1 << 8) #define IOCB_WAITQ (1 << 19)
#define IOCB_NOIO (1 << 9) #define IOCB_NOIO (1 << 20)
struct kiocb { struct kiocb {
struct file *ki_filp; struct file *ki_filp;
...@@ -3302,6 +3305,9 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) ...@@ -3302,6 +3305,9 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{ {
int kiocb_flags = 0; int kiocb_flags = 0;
/* make sure there's no overlap between RWF and private IOCB flags */
BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD);
if (!flags) if (!flags)
return 0; return 0;
if (unlikely(flags & ~RWF_SUPPORTED)) if (unlikely(flags & ~RWF_SUPPORTED))
...@@ -3310,16 +3316,11 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) ...@@ -3310,16 +3316,11 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
if (flags & RWF_NOWAIT) { if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP; return -EOPNOTSUPP;
kiocb_flags |= IOCB_NOWAIT | IOCB_NOIO; kiocb_flags |= IOCB_NOIO;
} }
if (flags & RWF_HIPRI) kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
kiocb_flags |= IOCB_HIPRI;
if (flags & RWF_DSYNC)
kiocb_flags |= IOCB_DSYNC;
if (flags & RWF_SYNC) if (flags & RWF_SYNC)
kiocb_flags |= (IOCB_DSYNC | IOCB_SYNC); kiocb_flags |= IOCB_DSYNC;
if (flags & RWF_APPEND)
kiocb_flags |= IOCB_APPEND;
ki->ki_flags |= kiocb_flags; ki->ki_flags |= kiocb_flags;
return 0; return 0;
...@@ -3499,15 +3500,6 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, ...@@ -3499,15 +3500,6 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice); int advice);
#if defined(CONFIG_IO_URING)
extern struct sock *io_uring_get_socket(struct file *file);
#else
static inline struct sock *io_uring_get_socket(struct file *file)
{
return NULL;
}
#endif
int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
unsigned int flags); unsigned int flags);
......
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_IO_URING_H
#define _LINUX_IO_URING_H
#include <linux/sched.h>
#include <linux/xarray.h>
#include <linux/percpu-refcount.h>
struct io_uring_task {
/* submission side */
struct xarray xa;
struct wait_queue_head wait;
struct file *last;
atomic_long_t req_issue;
/* completion side */
bool in_idle ____cacheline_aligned_in_smp;
atomic_long_t req_complete;
};
#if defined(CONFIG_IO_URING)
struct sock *io_uring_get_socket(struct file *file);
void __io_uring_task_cancel(void);
void __io_uring_files_cancel(struct files_struct *files);
void __io_uring_free(struct task_struct *tsk);
static inline void io_uring_task_cancel(void)
{
if (current->io_uring && !xa_empty(&current->io_uring->xa))
__io_uring_task_cancel();
}
static inline void io_uring_files_cancel(struct files_struct *files)
{
if (current->io_uring && !xa_empty(&current->io_uring->xa))
__io_uring_files_cancel(files);
}
static inline void io_uring_free(struct task_struct *tsk)
{
if (tsk->io_uring)
__io_uring_free(tsk);
}
#else
static inline struct sock *io_uring_get_socket(struct file *file)
{
return NULL;
}
static inline void io_uring_task_cancel(void)
{
}
static inline void io_uring_files_cancel(struct files_struct *files)
{
}
static inline void io_uring_free(struct task_struct *tsk)
{
}
#endif
#endif
...@@ -63,6 +63,7 @@ struct sighand_struct; ...@@ -63,6 +63,7 @@ struct sighand_struct;
struct signal_struct; struct signal_struct;
struct task_delay_info; struct task_delay_info;
struct task_group; struct task_group;
struct io_uring_task;
/* /*
* Task state bitmask. NOTE! These bits are also * Task state bitmask. NOTE! These bits are also
...@@ -935,6 +936,10 @@ struct task_struct { ...@@ -935,6 +936,10 @@ struct task_struct {
/* Open file information: */ /* Open file information: */
struct files_struct *files; struct files_struct *files;
#ifdef CONFIG_IO_URING
struct io_uring_task *io_uring;
#endif
/* Namespaces: */ /* Namespaces: */
struct nsproxy *nsproxy; struct nsproxy *nsproxy;
......
...@@ -95,6 +95,7 @@ enum { ...@@ -95,6 +95,7 @@ enum {
#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
enum { enum {
IORING_OP_NOP, IORING_OP_NOP,
...@@ -224,6 +225,7 @@ struct io_cqring_offsets { ...@@ -224,6 +225,7 @@ struct io_cqring_offsets {
*/ */
#define IORING_ENTER_GETEVENTS (1U << 0) #define IORING_ENTER_GETEVENTS (1U << 0)
#define IORING_ENTER_SQ_WAKEUP (1U << 1) #define IORING_ENTER_SQ_WAKEUP (1U << 1)
#define IORING_ENTER_SQ_WAIT (1U << 2)
/* /*
* Passed in for io_uring_setup(2). Copied back with updated info on success * Passed in for io_uring_setup(2). Copied back with updated info on success
...@@ -255,17 +257,24 @@ struct io_uring_params { ...@@ -255,17 +257,24 @@ struct io_uring_params {
/* /*
* io_uring_register(2) opcodes and arguments * io_uring_register(2) opcodes and arguments
*/ */
#define IORING_REGISTER_BUFFERS 0 enum {
#define IORING_UNREGISTER_BUFFERS 1 IORING_REGISTER_BUFFERS = 0,
#define IORING_REGISTER_FILES 2 IORING_UNREGISTER_BUFFERS = 1,
#define IORING_UNREGISTER_FILES 3 IORING_REGISTER_FILES = 2,
#define IORING_REGISTER_EVENTFD 4 IORING_UNREGISTER_FILES = 3,
#define IORING_UNREGISTER_EVENTFD 5 IORING_REGISTER_EVENTFD = 4,
#define IORING_REGISTER_FILES_UPDATE 6 IORING_UNREGISTER_EVENTFD = 5,
#define IORING_REGISTER_EVENTFD_ASYNC 7 IORING_REGISTER_FILES_UPDATE = 6,
#define IORING_REGISTER_PROBE 8 IORING_REGISTER_EVENTFD_ASYNC = 7,
#define IORING_REGISTER_PERSONALITY 9 IORING_REGISTER_PROBE = 8,
#define IORING_UNREGISTER_PERSONALITY 10 IORING_REGISTER_PERSONALITY = 9,
IORING_UNREGISTER_PERSONALITY = 10,
IORING_REGISTER_RESTRICTIONS = 11,
IORING_REGISTER_ENABLE_RINGS = 12,
/* this goes last */
IORING_REGISTER_LAST
};
struct io_uring_files_update { struct io_uring_files_update {
__u32 offset; __u32 offset;
...@@ -290,4 +299,34 @@ struct io_uring_probe { ...@@ -290,4 +299,34 @@ struct io_uring_probe {
struct io_uring_probe_op ops[0]; struct io_uring_probe_op ops[0];
}; };
struct io_uring_restriction {
__u16 opcode;
union {
__u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
__u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */
__u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */
};
__u8 resv;
__u32 resv2[3];
};
/*
* io_uring_restriction->opcode values
*/
enum {
/* Allow an io_uring_register(2) opcode */
IORING_RESTRICTION_REGISTER_OP = 0,
/* Allow an sqe opcode */
IORING_RESTRICTION_SQE_OP = 1,
/* Allow sqe flags */
IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2,
/* Require sqe flags (these flags must be set on each submission) */
IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3,
IORING_RESTRICTION_LAST
};
#endif #endif
...@@ -114,6 +114,9 @@ struct task_struct init_task ...@@ -114,6 +114,9 @@ struct task_struct init_task
.thread = INIT_THREAD, .thread = INIT_THREAD,
.fs = &init_fs, .fs = &init_fs,
.files = &init_files, .files = &init_files,
#ifdef CONFIG_IO_URING
.io_uring = NULL,
#endif
.signal = &init_signals, .signal = &init_signals,
.sighand = &init_sighand, .sighand = &init_sighand,
.nsproxy = &init_nsproxy, .nsproxy = &init_nsproxy,
......
...@@ -95,6 +95,7 @@ ...@@ -95,6 +95,7 @@
#include <linux/stackleak.h> #include <linux/stackleak.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/scs.h> #include <linux/scs.h>
#include <linux/io_uring.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -728,6 +729,7 @@ void __put_task_struct(struct task_struct *tsk) ...@@ -728,6 +729,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(refcount_read(&tsk->usage)); WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current); WARN_ON(tsk == current);
io_uring_free(tsk);
cgroup_free(tsk); cgroup_free(tsk);
task_numa_free(tsk, true); task_numa_free(tsk, true);
security_task_free(tsk); security_task_free(tsk);
...@@ -1983,6 +1985,10 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -1983,6 +1985,10 @@ static __latent_entropy struct task_struct *copy_process(
p->vtime.state = VTIME_INACTIVE; p->vtime.state = VTIME_INACTIVE;
#endif #endif
#ifdef CONFIG_IO_URING
p->io_uring = NULL;
#endif
#if defined(SPLIT_RSS_COUNTING) #if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat)); memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif #endif
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <net/af_unix.h> #include <net/af_unix.h>
#include <net/scm.h> #include <net/scm.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io_uring.h>
#include "scm.h" #include "scm.h"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment