Commit 405b4dc1 authored by Stefan Roesch's avatar Stefan Roesch Committed by Jens Axboe

io-uring: move io_wait_queue definition to header file

This moves the definition of the io_wait_queue structure to the header
file so it can be also used from other files.
Signed-off-by: default avatarStefan Roesch <shr@devkernel.io>
Link: https://lore.kernel.org/r/20230608163839.2891748-4-shr@devkernel.ioSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent adaad279
...@@ -2477,33 +2477,12 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -2477,33 +2477,12 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
return ret; return ret;
} }
struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
unsigned cq_tail;
unsigned nr_timeouts;
ktime_t timeout;
};
static inline bool io_has_work(struct io_ring_ctx *ctx) static inline bool io_has_work(struct io_ring_ctx *ctx)
{ {
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
!llist_empty(&ctx->work_llist); !llist_empty(&ctx->work_llist);
} }
static inline bool io_should_wake(struct io_wait_queue *iowq)
{
struct io_ring_ctx *ctx = iowq->ctx;
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
/*
* Wake up if we have enough events, or if a timeout occurred since we
* started waiting. For timeouts, we always want to return to userspace,
* regardless of event count.
*/
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
int wake_flags, void *key) int wake_flags, void *key)
{ {
......
...@@ -35,6 +35,28 @@ enum { ...@@ -35,6 +35,28 @@ enum {
IOU_STOP_MULTISHOT = -ECANCELED, IOU_STOP_MULTISHOT = -ECANCELED,
}; };
struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
unsigned cq_tail;
unsigned nr_timeouts;
ktime_t timeout;
};
static inline bool io_should_wake(struct io_wait_queue *iowq)
{
struct io_ring_ctx *ctx = iowq->ctx;
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
/*
* Wake up if we have enough events, or if a timeout occurred since we
* started waiting. For timeouts, we always want to return to userspace,
* regardless of event count.
*/
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
void io_req_cqe_overflow(struct io_kiocb *req); void io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx); int io_run_task_work_sig(struct io_ring_ctx *ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment