Commit 534121d2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.5-20191226' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

 - Removal of now unused busy wqe list (Hillf)

 - Add cond_resched() to io-wq work processing (Hillf)

 - And then the series that I hinted at from last week, which removes
   the sqe from the io_kiocb and keeps all sqe handling on the prep
   side. This guarantees that an opcode can't do the wrong thing and
   read the sqe more than once. This is unchanged from last week, no
   issues have been observed with this in testing. Hence I really think
   we should fold this into 5.5.

* tag 'io_uring-5.5-20191226' of git://git.kernel.dk/linux-block:
  io-wq: add cond_resched() to worker thread
  io-wq: remove unused busy list from io_sqe
  io_uring: pass in 'sqe' to the prep handlers
  io_uring: standardize the prep methods
  io_uring: read 'count' for IORING_OP_TIMEOUT in prep handler
  io_uring: move all prep state for IORING_OP_{SEND,RECV}_MGS to prep handler
  io_uring: move all prep state for IORING_OP_CONNECT to prep handler
  io_uring: add and use struct io_rw for read/writes
  io_uring: use u64_to_user_ptr() consistently
parents 0f710a55 fd1c4bc6
......@@ -92,7 +92,6 @@ struct io_wqe {
struct io_wqe_acct acct[2];
struct hlist_nulls_head free_list;
struct hlist_nulls_head busy_list;
struct list_head all_list;
struct io_wq *wq;
......@@ -327,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
if (worker->flags & IO_WORKER_F_FREE) {
worker->flags &= ~IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
}
/*
......@@ -365,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
{
if (!(worker->flags & IO_WORKER_F_FREE)) {
worker->flags |= IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
}
......@@ -432,6 +429,8 @@ static void io_worker_handle_work(struct io_worker *worker)
if (signal_pending(current))
flush_signals(current);
cond_resched();
spin_lock_irq(&worker->lock);
worker->cur_work = work;
spin_unlock_irq(&worker->lock);
......@@ -798,10 +797,6 @@ void io_wq_cancel_all(struct io_wq *wq)
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
/*
* Browse both lists, as there's a gap between handing work off
* to a worker and the worker putting itself on the busy_list
*/
rcu_read_lock();
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
......@@ -1049,7 +1044,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
spin_lock_init(&wqe->lock);
INIT_WQ_LIST(&wqe->work_list);
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
INIT_LIST_HEAD(&wqe->all_list);
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment