Commit 5853a7b5 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'for-6.1/io_uring' into for-6.1/passthrough

* for-6.1/io_uring: (56 commits)
  io_uring/net: fix notif cqe reordering
  io_uring/net: don't update msg_name if not provided
  io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL
  io_uring/rw: defer fsnotify calls to task context
  io_uring/net: fix fast_iov assignment in io_setup_async_msg()
  io_uring/net: fix non-zc send with address
  io_uring/net: don't skip notifs for failed requests
  io_uring/rw: don't lose short results on io_setup_async_rw()
  io_uring/rw: fix unexpected link breakage
  io_uring/net: fix cleanup double free free_iov init
  io_uring: fix CQE reordering
  io_uring/net: fix UAF in io_sendrecv_fail()
  selftest/net: adjust io_uring sendzc notif handling
  io_uring: ensure local task_work marks task as running
  io_uring/net: zerocopy sendmsg
  io_uring/net: combine fail handlers
  io_uring/net: rename io_sendzc()
  io_uring/net: support non-zerocopy sendto
  io_uring/net: refactor io_setup_async_addr
  io_uring/net: don't lose partial send_zc on fail
  ...
parents 736feaa3 108893dd
...@@ -1241,7 +1241,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t ret) ...@@ -1241,7 +1241,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
complete(&wait->done); complete(&wait->done);
} }
static bool blk_rq_is_poll(struct request *rq) bool blk_rq_is_poll(struct request *rq)
{ {
if (!rq->mq_hctx) if (!rq->mq_hctx)
return false; return false;
...@@ -1251,6 +1251,7 @@ static bool blk_rq_is_poll(struct request *rq) ...@@ -1251,6 +1251,7 @@ static bool blk_rq_is_poll(struct request *rq)
return false; return false;
return true; return true;
} }
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait) static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{ {
......
...@@ -3976,6 +3976,7 @@ static const struct file_operations nvme_ns_chr_fops = { ...@@ -3976,6 +3976,7 @@ static const struct file_operations nvme_ns_chr_fops = {
.unlocked_ioctl = nvme_ns_chr_ioctl, .unlocked_ioctl = nvme_ns_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl, .compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_chr_uring_cmd, .uring_cmd = nvme_ns_chr_uring_cmd,
.uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
}; };
static int nvme_add_ns_cdev(struct nvme_ns *ns) static int nvme_add_ns_cdev(struct nvme_ns *ns)
......
...@@ -398,11 +398,19 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err) ...@@ -398,11 +398,19 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
/* extract bio before reusing the same field for request */ /* extract bio before reusing the same field for request */
struct bio *bio = pdu->bio; struct bio *bio = pdu->bio;
void *cookie = READ_ONCE(ioucmd->cookie);
pdu->req = req; pdu->req = req;
req->bio = bio; req->bio = bio;
/* this takes care of moving rest of completion-work to task context */
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb); /*
* For iopoll, complete it directly.
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
nvme_uring_task_cb(ioucmd);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
} }
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
...@@ -452,7 +460,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -452,7 +460,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
rq_flags = REQ_NOWAIT; rq_flags = REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT; blk_flags = BLK_MQ_REQ_NOWAIT;
} }
if (issue_flags & IO_URING_F_IOPOLL)
rq_flags |= REQ_POLLED;
retry:
req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr), req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
d.data_len, nvme_to_user_ptr(d.metadata), d.data_len, nvme_to_user_ptr(d.metadata),
d.metadata_len, 0, &meta, d.timeout_ms ? d.metadata_len, 0, &meta, d.timeout_ms ?
...@@ -463,6 +474,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -463,6 +474,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
req->end_io = nvme_uring_cmd_end_io; req->end_io = nvme_uring_cmd_end_io;
req->end_io_data = ioucmd; req->end_io_data = ioucmd;
if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
if (unlikely(!req->bio)) {
/* we can't poll this, so alloc regular req instead */
blk_mq_free_request(req);
rq_flags &= ~REQ_POLLED;
goto retry;
} else {
WRITE_ONCE(ioucmd->cookie, req->bio);
req->bio->bi_opf |= REQ_POLLED;
}
}
/* to free bio on completion, as req->bio will be null at that time */ /* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio; pdu->bio = req->bio;
pdu->meta = meta; pdu->meta = meta;
...@@ -566,9 +588,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -566,9 +588,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int nvme_uring_cmd_checks(unsigned int issue_flags) static int nvme_uring_cmd_checks(unsigned int issue_flags)
{ {
/* IOPOLL not supported yet */
if (issue_flags & IO_URING_F_IOPOLL)
return -EOPNOTSUPP;
/* NVMe passthrough requires big SQE/CQE support */ /* NVMe passthrough requires big SQE/CQE support */
if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
...@@ -611,6 +630,25 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) ...@@ -611,6 +630,25 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
return nvme_ns_uring_cmd(ns, ioucmd, issue_flags); return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
} }
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob,
unsigned int poll_flags)
{
struct bio *bio;
int ret = 0;
struct nvme_ns *ns;
struct request_queue *q;
rcu_read_lock();
bio = READ_ONCE(ioucmd->cookie);
ns = container_of(file_inode(ioucmd->file)->i_cdev,
struct nvme_ns, cdev);
q = ns->queue;
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
ret = bio_poll(bio, iob, poll_flags);
rcu_read_unlock();
return ret;
}
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx) void __user *argp, struct nvme_ns_head *head, int srcu_idx)
...@@ -692,6 +730,31 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, ...@@ -692,6 +730,31 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
srcu_read_unlock(&head->srcu, srcu_idx); srcu_read_unlock(&head->srcu, srcu_idx);
return ret; return ret;
} }
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob,
unsigned int poll_flags)
{
struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
int srcu_idx = srcu_read_lock(&head->srcu);
struct nvme_ns *ns = nvme_find_path(head);
struct bio *bio;
int ret = 0;
struct request_queue *q;
if (ns) {
rcu_read_lock();
bio = READ_ONCE(ioucmd->cookie);
q = ns->queue;
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
&& bio->bi_bdev)
ret = bio_poll(bio, iob, poll_flags);
rcu_read_unlock();
}
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
#endif /* CONFIG_NVME_MULTIPATH */ #endif /* CONFIG_NVME_MULTIPATH */
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
...@@ -699,6 +762,10 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) ...@@ -699,6 +762,10 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
struct nvme_ctrl *ctrl = ioucmd->file->private_data; struct nvme_ctrl *ctrl = ioucmd->file->private_data;
int ret; int ret;
/* IOPOLL not supported yet */
if (issue_flags & IO_URING_F_IOPOLL)
return -EOPNOTSUPP;
ret = nvme_uring_cmd_checks(issue_flags); ret = nvme_uring_cmd_checks(issue_flags);
if (ret) if (ret)
return ret; return ret;
......
...@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = { ...@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.unlocked_ioctl = nvme_ns_head_chr_ioctl, .unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl, .compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_head_chr_uring_cmd, .uring_cmd = nvme_ns_head_chr_uring_cmd,
.uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
}; };
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
......
...@@ -843,6 +843,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, ...@@ -843,6 +843,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg); unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd, long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg); unsigned long arg);
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags); unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
......
...@@ -69,17 +69,17 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) ...@@ -69,17 +69,17 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
* it returns false, the eventfd_signal() call should be deferred to a * it returns false, the eventfd_signal() call should be deferred to a
* safe context. * safe context.
*/ */
if (WARN_ON_ONCE(current->in_eventfd_signal)) if (WARN_ON_ONCE(current->in_eventfd))
return 0; return 0;
spin_lock_irqsave(&ctx->wqh.lock, flags); spin_lock_irqsave(&ctx->wqh.lock, flags);
current->in_eventfd_signal = 1; current->in_eventfd = 1;
if (ULLONG_MAX - ctx->count < n) if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count; n = ULLONG_MAX - ctx->count;
ctx->count += n; ctx->count += n;
if (waitqueue_active(&ctx->wqh)) if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN); wake_up_locked_poll(&ctx->wqh, EPOLLIN);
current->in_eventfd_signal = 0; current->in_eventfd = 0;
spin_unlock_irqrestore(&ctx->wqh.lock, flags); spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n; return n;
...@@ -253,8 +253,10 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to) ...@@ -253,8 +253,10 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
} }
eventfd_ctx_do_read(ctx, &ucnt); eventfd_ctx_do_read(ctx, &ucnt);
current->in_eventfd = 1;
if (waitqueue_active(&ctx->wqh)) if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLOUT); wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
current->in_eventfd = 0;
spin_unlock_irq(&ctx->wqh.lock); spin_unlock_irq(&ctx->wqh.lock);
if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt))) if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
return -EFAULT; return -EFAULT;
...@@ -301,8 +303,10 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c ...@@ -301,8 +303,10 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
} }
if (likely(res > 0)) { if (likely(res > 0)) {
ctx->count += ucnt; ctx->count += ucnt;
current->in_eventfd = 1;
if (waitqueue_active(&ctx->wqh)) if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN); wake_up_locked_poll(&ctx->wqh, EPOLLIN);
current->in_eventfd = 0;
} }
spin_unlock_irq(&ctx->wqh.lock); spin_unlock_irq(&ctx->wqh.lock);
......
...@@ -987,6 +987,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *, ...@@ -987,6 +987,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *,
int blk_rq_append_bio(struct request *rq, struct bio *bio); int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head); void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head); blk_status_t blk_execute_rq(struct request *rq, bool at_head);
bool blk_rq_is_poll(struct request *rq);
struct req_iterator { struct req_iterator {
struct bvec_iter iter; struct bvec_iter iter;
......
...@@ -46,7 +46,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); ...@@ -46,7 +46,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
static inline bool eventfd_signal_allowed(void) static inline bool eventfd_signal_allowed(void)
{ {
return !current->in_eventfd_signal; return !current->in_eventfd;
} }
#else /* CONFIG_EVENTFD */ #else /* CONFIG_EVENTFD */
......
...@@ -2132,6 +2132,8 @@ struct file_operations { ...@@ -2132,6 +2132,8 @@ struct file_operations {
loff_t len, unsigned int remap_flags); loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int); int (*fadvise)(struct file *, loff_t, loff_t, int);
int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
unsigned int poll_flags);
} __randomize_layout; } __randomize_layout;
struct inode_operations { struct inode_operations {
......
...@@ -20,8 +20,12 @@ enum io_uring_cmd_flags { ...@@ -20,8 +20,12 @@ enum io_uring_cmd_flags {
struct io_uring_cmd { struct io_uring_cmd {
struct file *file; struct file *file;
const void *cmd; const void *cmd;
/* callback to defer completions to task context */ union {
void (*task_work_cb)(struct io_uring_cmd *cmd); /* callback to defer completions to task context */
void (*task_work_cb)(struct io_uring_cmd *cmd);
/* used for polled completion */
void *cookie;
};
u32 cmd_op; u32 cmd_op;
u32 pad; u32 pad;
u8 pdu[32]; /* available inline for free use */ u8 pdu[32]; /* available inline for free use */
......
...@@ -184,6 +184,8 @@ struct io_ev_fd { ...@@ -184,6 +184,8 @@ struct io_ev_fd {
struct eventfd_ctx *cq_ev_fd; struct eventfd_ctx *cq_ev_fd;
unsigned int eventfd_async: 1; unsigned int eventfd_async: 1;
struct rcu_head rcu; struct rcu_head rcu;
atomic_t refs;
atomic_t ops;
}; };
struct io_alloc_cache { struct io_alloc_cache {
...@@ -301,6 +303,8 @@ struct io_ring_ctx { ...@@ -301,6 +303,8 @@ struct io_ring_ctx {
struct io_hash_table cancel_table; struct io_hash_table cancel_table;
bool poll_multi_queue; bool poll_multi_queue;
struct llist_head work_llist;
struct list_head io_buffers_comp; struct list_head io_buffers_comp;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
......
...@@ -936,7 +936,7 @@ struct task_struct { ...@@ -936,7 +936,7 @@ struct task_struct {
#endif #endif
#ifdef CONFIG_EVENTFD #ifdef CONFIG_EVENTFD
/* Recursion prevention for eventfd_signal() */ /* Recursion prevention for eventfd_signal() */
unsigned in_eventfd_signal:1; unsigned in_eventfd:1;
#endif #endif
#ifdef CONFIG_IOMMU_SVA #ifdef CONFIG_IOMMU_SVA
unsigned pasid_activated:1; unsigned pasid_activated:1;
......
...@@ -655,6 +655,35 @@ TRACE_EVENT(io_uring_short_write, ...@@ -655,6 +655,35 @@ TRACE_EVENT(io_uring_short_write,
__entry->wanted, __entry->got) __entry->wanted, __entry->got)
); );
/*
* io_uring_local_work_run - ran ring local task work
*
* @tctx: pointer to a io_uring_ctx
* @count: how many functions it ran
* @loops: how many loops it ran
*
*/
TRACE_EVENT(io_uring_local_work_run,
TP_PROTO(void *ctx, int count, unsigned int loops),
TP_ARGS(ctx, count, loops),
TP_STRUCT__entry (
__field(void *, ctx )
__field(int, count )
__field(unsigned int, loops )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->count = count;
__entry->loops = loops;
),
TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
);
#endif /* _TRACE_IO_URING_H */ #endif /* _TRACE_IO_URING_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -157,6 +157,13 @@ enum { ...@@ -157,6 +157,13 @@ enum {
*/ */
#define IORING_SETUP_SINGLE_ISSUER (1U << 12) #define IORING_SETUP_SINGLE_ISSUER (1U << 12)
/*
* Defer running task work to get events.
* Rather than running bits of task work whenever the task transitions
* try to do it just before it is needed.
*/
#define IORING_SETUP_DEFER_TASKRUN (1U << 13)
enum io_uring_op { enum io_uring_op {
IORING_OP_NOP, IORING_OP_NOP,
IORING_OP_READV, IORING_OP_READV,
...@@ -206,6 +213,7 @@ enum io_uring_op { ...@@ -206,6 +213,7 @@ enum io_uring_op {
IORING_OP_SOCKET, IORING_OP_SOCKET,
IORING_OP_URING_CMD, IORING_OP_URING_CMD,
IORING_OP_SEND_ZC, IORING_OP_SEND_ZC,
IORING_OP_SENDMSG_ZC,
/* this goes last, obviously */ /* this goes last, obviously */
IORING_OP_LAST, IORING_OP_LAST,
......
...@@ -292,7 +292,7 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg) ...@@ -292,7 +292,7 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
break; break;
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig(); ret = io_run_task_work_sig(ctx);
if (ret < 0) { if (ret < 0) {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
break; break;
......
...@@ -60,13 +60,15 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, ...@@ -60,13 +60,15 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
unsigned int cq_head = READ_ONCE(r->cq.head); unsigned int cq_head = READ_ONCE(r->cq.head);
unsigned int cq_tail = READ_ONCE(r->cq.tail); unsigned int cq_tail = READ_ONCE(r->cq.tail);
unsigned int cq_shift = 0; unsigned int cq_shift = 0;
unsigned int sq_shift = 0;
unsigned int sq_entries, cq_entries; unsigned int sq_entries, cq_entries;
bool has_lock; bool has_lock;
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
unsigned int i; unsigned int i;
if (is_cqe32) if (ctx->flags & IORING_SETUP_CQE32)
cq_shift = 1; cq_shift = 1;
if (ctx->flags & IORING_SETUP_SQE128)
sq_shift = 1;
/* /*
* we may get imprecise sqe and cqe info if uring is actively running * we may get imprecise sqe and cqe info if uring is actively running
...@@ -82,19 +84,36 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, ...@@ -82,19 +84,36 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
seq_printf(m, "CqHead:\t%u\n", cq_head); seq_printf(m, "CqHead:\t%u\n", cq_head);
seq_printf(m, "CqTail:\t%u\n", cq_tail); seq_printf(m, "CqTail:\t%u\n", cq_tail);
seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head); seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
sq_entries = min(sq_tail - sq_head, ctx->sq_entries); sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
for (i = 0; i < sq_entries; i++) { for (i = 0; i < sq_entries; i++) {
unsigned int entry = i + sq_head; unsigned int entry = i + sq_head;
unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
struct io_uring_sqe *sqe; struct io_uring_sqe *sqe;
unsigned int sq_idx;
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
if (sq_idx > sq_mask) if (sq_idx > sq_mask)
continue; continue;
sqe = &ctx->sq_sqes[sq_idx]; sqe = &ctx->sq_sqes[sq_idx << 1];
seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
sq_idx, sqe->opcode, sqe->fd, sqe->flags, "addr:0x%llx, rw_flags:0x%x, buf_index:%d "
sqe->user_data); "user_data:%llu",
sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
sqe->flags, (unsigned long long) sqe->off,
(unsigned long long) sqe->addr, sqe->rw_flags,
sqe->buf_index, sqe->user_data);
if (sq_shift) {
u64 *sqeb = (void *) (sqe + 1);
int size = sizeof(struct io_uring_sqe) / sizeof(u64);
int j;
for (j = 0; j < size; j++) {
seq_printf(m, ", e%d:0x%llx", j,
(unsigned long long) *sqeb);
sqeb++;
}
}
seq_printf(m, "\n");
} }
seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
cq_entries = min(cq_tail - cq_head, ctx->cq_entries); cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
...@@ -102,16 +121,13 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, ...@@ -102,16 +121,13 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
unsigned int entry = i + cq_head; unsigned int entry = i + cq_head;
struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
if (!is_cqe32) { seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x",
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
entry & cq_mask, cqe->user_data, cqe->res, entry & cq_mask, cqe->user_data, cqe->res,
cqe->flags); cqe->flags);
} else { if (cq_shift)
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, " seq_printf(m, ", extra1:%llu, extra2:%llu\n",
"extra1:%llu, extra2:%llu\n", cqe->big_cqe[0], cqe->big_cqe[1]);
entry & cq_mask, cqe->user_data, cqe->res, seq_printf(m, "\n");
cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]);
}
} }
/* /*
......
...@@ -125,6 +125,11 @@ enum { ...@@ -125,6 +125,11 @@ enum {
IO_CHECK_CQ_DROPPED_BIT, IO_CHECK_CQ_DROPPED_BIT,
}; };
enum {
IO_EVENTFD_OP_SIGNAL_BIT,
IO_EVENTFD_OP_FREE_BIT,
};
struct io_defer_entry { struct io_defer_entry {
struct list_head list; struct list_head list;
struct io_kiocb *req; struct io_kiocb *req;
...@@ -142,7 +147,7 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, ...@@ -142,7 +147,7 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
static void io_dismantle_req(struct io_kiocb *req); static void io_dismantle_req(struct io_kiocb *req);
static void io_clean_op(struct io_kiocb *req); static void io_clean_op(struct io_kiocb *req);
static void io_queue_sqe(struct io_kiocb *req); static void io_queue_sqe(struct io_kiocb *req);
static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
static void __io_submit_flush_completions(struct io_ring_ctx *ctx); static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
static struct kmem_cache *req_cachep; static struct kmem_cache *req_cachep;
...@@ -316,6 +321,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -316,6 +321,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->rsrc_ref_list); INIT_LIST_HEAD(&ctx->rsrc_ref_list);
INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
init_llist_head(&ctx->rsrc_put_llist); init_llist_head(&ctx->rsrc_put_llist);
init_llist_head(&ctx->work_llist);
INIT_LIST_HEAD(&ctx->tctx_list); INIT_LIST_HEAD(&ctx->tctx_list);
ctx->submit_state.free_list.next = NULL; ctx->submit_state.free_list.next = NULL;
INIT_WQ_LIST(&ctx->locked_free_list); INIT_WQ_LIST(&ctx->locked_free_list);
...@@ -477,25 +483,28 @@ static __cold void io_queue_deferred(struct io_ring_ctx *ctx) ...@@ -477,25 +483,28 @@ static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
} }
} }
static void io_eventfd_signal(struct io_ring_ctx *ctx)
static void io_eventfd_ops(struct rcu_head *rcu)
{ {
struct io_ev_fd *ev_fd; struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
bool skip; int ops = atomic_xchg(&ev_fd->ops, 0);
spin_lock(&ctx->completion_lock); if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
/* eventfd_signal(ev_fd->cq_ev_fd, 1);
* Eventfd should only get triggered when at least one event has been
* posted. Some applications rely on the eventfd notification count only /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
* changing IFF a new CQE has been added to the CQ ring. There's no * ordering in a race but if references are 0 we know we have to free
* depedency on 1:1 relationship between how many times this function is * it regardless.
* called (and hence the eventfd count) and number of CQEs posted to the
* CQ ring.
*/ */
skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail; if (atomic_dec_and_test(&ev_fd->refs)) {
ctx->evfd_last_cq_tail = ctx->cached_cq_tail; eventfd_ctx_put(ev_fd->cq_ev_fd);
spin_unlock(&ctx->completion_lock); kfree(ev_fd);
if (skip) }
return; }
static void io_eventfd_signal(struct io_ring_ctx *ctx)
{
struct io_ev_fd *ev_fd = NULL;
rcu_read_lock(); rcu_read_lock();
/* /*
...@@ -513,13 +522,46 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) ...@@ -513,13 +522,46 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
goto out; goto out;
if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
goto out; goto out;
if (ev_fd->eventfd_async && !io_wq_current_is_worker())
goto out;
if (!ev_fd->eventfd_async || io_wq_current_is_worker()) if (likely(eventfd_signal_allowed())) {
eventfd_signal(ev_fd->cq_ev_fd, 1); eventfd_signal(ev_fd->cq_ev_fd, 1);
} else {
atomic_inc(&ev_fd->refs);
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
call_rcu(&ev_fd->rcu, io_eventfd_ops);
else
atomic_dec(&ev_fd->refs);
}
out: out:
rcu_read_unlock(); rcu_read_unlock();
} }
static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
{
bool skip;
spin_lock(&ctx->completion_lock);
/*
* Eventfd should only get triggered when at least one event has been
* posted. Some applications rely on the eventfd notification count
* only changing IFF a new CQE has been added to the CQ ring. There's
* no depedency on 1:1 relationship between how many times this
* function is called (and hence the eventfd count) and number of CQEs
* posted to the CQ ring.
*/
skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
spin_unlock(&ctx->completion_lock);
if (skip)
return;
io_eventfd_signal(ctx);
}
void __io_commit_cqring_flush(struct io_ring_ctx *ctx) void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
{ {
if (ctx->off_timeout_used || ctx->drain_active) { if (ctx->off_timeout_used || ctx->drain_active) {
...@@ -531,7 +573,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx) ...@@ -531,7 +573,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
} }
if (ctx->has_evfd) if (ctx->has_evfd)
io_eventfd_signal(ctx); io_eventfd_flush_signal(ctx);
} }
static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx) static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
...@@ -567,7 +609,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -567,7 +609,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
io_cq_lock(ctx); io_cq_lock(ctx);
while (!list_empty(&ctx->cq_overflow_list)) { while (!list_empty(&ctx->cq_overflow_list)) {
struct io_uring_cqe *cqe = io_get_cqe(ctx); struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
struct io_overflow_cqe *ocqe; struct io_overflow_cqe *ocqe;
if (!cqe && !force) if (!cqe && !force)
...@@ -694,12 +736,19 @@ bool io_req_cqe_overflow(struct io_kiocb *req) ...@@ -694,12 +736,19 @@ bool io_req_cqe_overflow(struct io_kiocb *req)
* control dependency is enough as we're using WRITE_ONCE to * control dependency is enough as we're using WRITE_ONCE to
* fill the cq entry * fill the cq entry
*/ */
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx) struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
{ {
struct io_rings *rings = ctx->rings; struct io_rings *rings = ctx->rings;
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
unsigned int free, queued, len; unsigned int free, queued, len;
/*
* Posting into the CQ when there are pending overflowed CQEs may break
* ordering guarantees, which will affect links, F_MORE users and more.
* Force overflow the completion.
*/
if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
return NULL;
/* userspace may cheat modifying the tail, be safe and do min */ /* userspace may cheat modifying the tail, be safe and do min */
queued = min(__io_cqring_events(ctx), ctx->cq_entries); queued = min(__io_cqring_events(ctx), ctx->cq_entries);
...@@ -823,8 +872,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags) ...@@ -823,8 +872,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
void io_req_complete_failed(struct io_kiocb *req, s32 res) void io_req_complete_failed(struct io_kiocb *req, s32 res)
{ {
const struct io_op_def *def = &io_op_defs[req->opcode];
req_set_fail(req); req_set_fail(req);
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
if (def->fail)
def->fail(req);
io_req_complete_post(req); io_req_complete_post(req);
} }
...@@ -1047,17 +1100,40 @@ void tctx_task_work(struct callback_head *cb) ...@@ -1047,17 +1100,40 @@ void tctx_task_work(struct callback_head *cb)
trace_io_uring_task_work_run(tctx, count, loops); trace_io_uring_task_work_run(tctx, count, loops);
} }
void io_req_task_work_add(struct io_kiocb *req) static void io_req_local_work_add(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
if (!llist_add(&req->io_task_work.node, &ctx->work_llist))
return;
if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
io_move_task_work_from_local(ctx);
return;
}
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
if (ctx->has_evfd)
io_eventfd_signal(ctx);
io_cqring_wake(ctx);
}
static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
{ {
struct io_uring_task *tctx = req->task->io_uring; struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct llist_node *node; struct llist_node *node;
bool running;
running = !llist_add(&req->io_task_work.node, &tctx->task_list); if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
io_req_local_work_add(req);
return;
}
/* task_work already pending, we're done */ /* task_work already pending, we're done */
if (running) if (!llist_add(&req->io_task_work.node, &tctx->task_list))
return; return;
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
...@@ -1077,6 +1153,84 @@ void io_req_task_work_add(struct io_kiocb *req) ...@@ -1077,6 +1153,84 @@ void io_req_task_work_add(struct io_kiocb *req)
} }
} }
void io_req_task_work_add(struct io_kiocb *req)
{
__io_req_task_work_add(req, true);
}
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
{
struct llist_node *node;
node = llist_del_all(&ctx->work_llist);
while (node) {
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
node = node->next;
__io_req_task_work_add(req, false);
}
}
int __io_run_local_work(struct io_ring_ctx *ctx, bool locked)
{
struct llist_node *node;
struct llist_node fake;
struct llist_node *current_final = NULL;
int ret;
unsigned int loops = 1;
if (unlikely(ctx->submitter_task != current))
return -EEXIST;
node = io_llist_xchg(&ctx->work_llist, &fake);
ret = 0;
again:
while (node != current_final) {
struct llist_node *next = node->next;
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
prefetch(container_of(next, struct io_kiocb, io_task_work.node));
req->io_task_work.func(req, &locked);
ret++;
node = next;
}
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
node = io_llist_cmpxchg(&ctx->work_llist, &fake, NULL);
if (node != &fake) {
loops++;
current_final = &fake;
node = io_llist_xchg(&ctx->work_llist, &fake);
goto again;
}
if (locked)
io_submit_flush_completions(ctx);
trace_io_uring_local_work_run(ctx, ret, loops);
return ret;
}
int io_run_local_work(struct io_ring_ctx *ctx)
{
bool locked;
int ret;
if (llist_empty(&ctx->work_llist))
return 0;
__set_current_state(TASK_RUNNING);
locked = mutex_trylock(&ctx->uring_lock);
ret = __io_run_local_work(ctx, locked);
if (locked)
mutex_unlock(&ctx->uring_lock);
return ret;
}
static void io_req_tw_post(struct io_kiocb *req, bool *locked) static void io_req_tw_post(struct io_kiocb *req, bool *locked)
{ {
io_req_complete_post(req); io_req_complete_post(req);
...@@ -1183,7 +1337,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) ...@@ -1183,7 +1337,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_wq_work_node *node, *prev; struct io_wq_work_node *node, *prev;
struct io_submit_state *state = &ctx->submit_state; struct io_submit_state *state = &ctx->submit_state;
spin_lock(&ctx->completion_lock); io_cq_lock(ctx);
wq_list_for_each(node, prev, &state->compl_reqs) { wq_list_for_each(node, prev, &state->compl_reqs) {
struct io_kiocb *req = container_of(node, struct io_kiocb, struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list); comp_list);
...@@ -1254,6 +1408,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) ...@@ -1254,6 +1408,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
int ret = 0; int ret = 0;
unsigned long check_cq; unsigned long check_cq;
if (!io_allowed_run_tw(ctx))
return -EEXIST;
check_cq = READ_ONCE(ctx->check_cq); check_cq = READ_ONCE(ctx->check_cq);
if (unlikely(check_cq)) { if (unlikely(check_cq)) {
if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
...@@ -1284,13 +1441,19 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) ...@@ -1284,13 +1441,19 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* forever, while the workqueue is stuck trying to acquire the * forever, while the workqueue is stuck trying to acquire the
* very same mutex. * very same mutex.
*/ */
if (wq_list_empty(&ctx->iopoll_list)) { if (wq_list_empty(&ctx->iopoll_list) ||
io_task_work_pending(ctx)) {
u32 tail = ctx->cached_cq_tail; u32 tail = ctx->cached_cq_tail;
mutex_unlock(&ctx->uring_lock); if (!llist_empty(&ctx->work_llist))
io_run_task_work(); __io_run_local_work(ctx, true);
mutex_lock(&ctx->uring_lock);
if (task_work_pending(current) ||
wq_list_empty(&ctx->iopoll_list)) {
mutex_unlock(&ctx->uring_lock);
io_run_task_work();
mutex_lock(&ctx->uring_lock);
}
/* some requests don't go through iopoll_list */ /* some requests don't go through iopoll_list */
if (tail != ctx->cached_cq_tail || if (tail != ctx->cached_cq_tail ||
wq_list_empty(&ctx->iopoll_list)) wq_list_empty(&ctx->iopoll_list))
...@@ -1732,10 +1895,6 @@ static void io_queue_async(struct io_kiocb *req, int ret) ...@@ -1732,10 +1895,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
io_req_task_queue(req); io_req_task_queue(req);
break; break;
case IO_APOLL_ABORTED: case IO_APOLL_ABORTED:
/*
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
io_kbuf_recycle(req, 0); io_kbuf_recycle(req, 0);
io_queue_iowq(req, NULL); io_queue_iowq(req, NULL);
break; break;
...@@ -2149,6 +2308,13 @@ struct io_wait_queue { ...@@ -2149,6 +2308,13 @@ struct io_wait_queue {
unsigned nr_timeouts; unsigned nr_timeouts;
}; };
static inline bool io_has_work(struct io_ring_ctx *ctx)
{
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
!llist_empty(&ctx->work_llist));
}
static inline bool io_should_wake(struct io_wait_queue *iowq) static inline bool io_should_wake(struct io_wait_queue *iowq)
{ {
struct io_ring_ctx *ctx = iowq->ctx; struct io_ring_ctx *ctx = iowq->ctx;
...@@ -2167,20 +2333,20 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, ...@@ -2167,20 +2333,20 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
{ {
struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
wq); wq);
struct io_ring_ctx *ctx = iowq->ctx;
/* /*
* Cannot safely flush overflowed CQEs from here, ensure we wake up * Cannot safely flush overflowed CQEs from here, ensure we wake up
* the task, and the next invocation will do it. * the task, and the next invocation will do it.
*/ */
if (io_should_wake(iowq) || if (io_should_wake(iowq) || io_has_work(ctx))
test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq))
return autoremove_wake_function(curr, mode, wake_flags, key); return autoremove_wake_function(curr, mode, wake_flags, key);
return -1; return -1;
} }
int io_run_task_work_sig(void) int io_run_task_work_sig(struct io_ring_ctx *ctx)
{ {
if (io_run_task_work()) if (io_run_task_work_ctx(ctx) > 0)
return 1; return 1;
if (task_sigpending(current)) if (task_sigpending(current))
return -EINTR; return -EINTR;
...@@ -2196,7 +2362,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, ...@@ -2196,7 +2362,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
unsigned long check_cq; unsigned long check_cq;
/* make sure we run task_work before checking for signals */ /* make sure we run task_work before checking for signals */
ret = io_run_task_work_sig(); ret = io_run_task_work_sig(ctx);
if (ret || io_should_wake(iowq)) if (ret || io_should_wake(iowq))
return ret; return ret;
...@@ -2226,13 +2392,19 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -2226,13 +2392,19 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
ktime_t timeout = KTIME_MAX; ktime_t timeout = KTIME_MAX;
int ret; int ret;
if (!io_allowed_run_tw(ctx))
return -EEXIST;
do { do {
/* always run at least 1 task work to process local work */
ret = io_run_task_work_ctx(ctx);
if (ret < 0)
return ret;
io_cqring_overflow_flush(ctx); io_cqring_overflow_flush(ctx);
if (io_cqring_events(ctx) >= min_events) if (io_cqring_events(ctx) >= min_events)
return 0; return 0;
if (!io_run_task_work()) } while (ret > 0);
break;
} while (1);
if (sig) { if (sig) {
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -2366,17 +2538,11 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -2366,17 +2538,11 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
ev_fd->eventfd_async = eventfd_async; ev_fd->eventfd_async = eventfd_async;
ctx->has_evfd = true; ctx->has_evfd = true;
rcu_assign_pointer(ctx->io_ev_fd, ev_fd); rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
atomic_set(&ev_fd->refs, 1);
atomic_set(&ev_fd->ops, 0);
return 0; return 0;
} }
static void io_eventfd_put(struct rcu_head *rcu)
{
struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
eventfd_ctx_put(ev_fd->cq_ev_fd);
kfree(ev_fd);
}
static int io_eventfd_unregister(struct io_ring_ctx *ctx) static int io_eventfd_unregister(struct io_ring_ctx *ctx)
{ {
struct io_ev_fd *ev_fd; struct io_ev_fd *ev_fd;
...@@ -2386,7 +2552,8 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx) ...@@ -2386,7 +2552,8 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
if (ev_fd) { if (ev_fd) {
ctx->has_evfd = false; ctx->has_evfd = false;
rcu_assign_pointer(ctx->io_ev_fd, NULL); rcu_assign_pointer(ctx->io_ev_fd, NULL);
call_rcu(&ev_fd->rcu, io_eventfd_put); if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
call_rcu(&ev_fd->rcu, io_eventfd_ops);
return 0; return 0;
} }
...@@ -2509,8 +2676,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) ...@@ -2509,8 +2676,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
* Users may get EPOLLIN meanwhile seeing nothing in cqring, this * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
* pushs them to do the flush. * pushs them to do the flush.
*/ */
if (io_cqring_events(ctx) ||
test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) if (io_cqring_events(ctx) || io_has_work(ctx))
mask |= EPOLLIN | EPOLLRDNORM; mask |= EPOLLIN | EPOLLRDNORM;
return mask; return mask;
...@@ -2573,6 +2740,9 @@ static __cold void io_ring_exit_work(struct work_struct *work) ...@@ -2573,6 +2740,9 @@ static __cold void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them. * as nobody else will be looking for them.
*/ */
do { do {
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
io_move_task_work_from_local(ctx);
while (io_uring_try_cancel_requests(ctx, NULL, true)) while (io_uring_try_cancel_requests(ctx, NULL, true))
cond_resched(); cond_resched();
...@@ -2770,13 +2940,15 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, ...@@ -2770,13 +2940,15 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
} }
} }
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
ret |= io_run_local_work(ctx) > 0;
ret |= io_cancel_defer_files(ctx, task, cancel_all); ret |= io_cancel_defer_files(ctx, task, cancel_all);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret |= io_poll_remove_all(ctx, task, cancel_all); ret |= io_poll_remove_all(ctx, task, cancel_all);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
ret |= io_kill_timeouts(ctx, task, cancel_all); ret |= io_kill_timeouts(ctx, task, cancel_all);
if (task) if (task)
ret |= io_run_task_work(); ret |= io_run_task_work() > 0;
return ret; return ret;
} }
...@@ -2992,8 +3164,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -2992,8 +3164,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
struct fd f; struct fd f;
long ret; long ret;
io_run_task_work();
if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
IORING_ENTER_REGISTERED_RING))) IORING_ENTER_REGISTERED_RING)))
...@@ -3063,8 +3233,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -3063,8 +3233,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
goto iopoll_locked; goto iopoll_locked;
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
if (flags & IORING_ENTER_GETEVENTS) { if (flags & IORING_ENTER_GETEVENTS) {
int ret2; int ret2;
if (ctx->syscall_iopoll) { if (ctx->syscall_iopoll) {
/* /*
* We disallow the app entering submit/complete with * We disallow the app entering submit/complete with
...@@ -3293,17 +3465,29 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, ...@@ -3293,17 +3465,29 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
if (ctx->flags & IORING_SETUP_SQPOLL) { if (ctx->flags & IORING_SETUP_SQPOLL) {
/* IPI related flags don't make sense with SQPOLL */ /* IPI related flags don't make sense with SQPOLL */
if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
IORING_SETUP_TASKRUN_FLAG)) IORING_SETUP_TASKRUN_FLAG |
IORING_SETUP_DEFER_TASKRUN))
goto err; goto err;
ctx->notify_method = TWA_SIGNAL_NO_IPI; ctx->notify_method = TWA_SIGNAL_NO_IPI;
} else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
ctx->notify_method = TWA_SIGNAL_NO_IPI; ctx->notify_method = TWA_SIGNAL_NO_IPI;
} else { } else {
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
goto err; goto err;
ctx->notify_method = TWA_SIGNAL; ctx->notify_method = TWA_SIGNAL;
} }
/*
* For DEFER_TASKRUN we require the completion task to be the same as the
* submission task. This implies that there is only one submitter, so enforce
* that.
*/
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
!(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
goto err;
}
/* /*
* This is just grabbed for accounting purposes. When a process exits, * This is just grabbed for accounting purposes. When a process exits,
* the mm is exited and dropped before the files, hence we need to hang * the mm is exited and dropped before the files, hence we need to hang
...@@ -3404,7 +3588,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) ...@@ -3404,7 +3588,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
IORING_SETUP_SINGLE_ISSUER)) IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN))
return -EINVAL; return -EINVAL;
return io_uring_create(entries, &p, params); return io_uring_create(entries, &p, params);
...@@ -3867,7 +4051,7 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, ...@@ -3867,7 +4051,7 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
ctx = f.file->private_data; ctx = f.file->private_data;
io_run_task_work(); io_run_task_work_ctx(ctx);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args); ret = __io_uring_register(ctx, opcode, arg, nr_args);
......
...@@ -24,9 +24,11 @@ enum { ...@@ -24,9 +24,11 @@ enum {
IOU_STOP_MULTISHOT = -ECANCELED, IOU_STOP_MULTISHOT = -ECANCELED,
}; };
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx); struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
bool io_req_cqe_overflow(struct io_kiocb *req); bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(void); int io_run_task_work_sig(struct io_ring_ctx *ctx);
int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
int io_run_local_work(struct io_ring_ctx *ctx);
void io_req_complete_failed(struct io_kiocb *req, s32 res); void io_req_complete_failed(struct io_kiocb *req, s32 res);
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags); void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
void io_req_complete_post(struct io_kiocb *req); void io_req_complete_post(struct io_kiocb *req);
...@@ -91,7 +93,8 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx) ...@@ -91,7 +93,8 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx)
void io_cq_unlock_post(struct io_ring_ctx *ctx); void io_cq_unlock_post(struct io_ring_ctx *ctx);
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
bool overflow)
{ {
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
struct io_uring_cqe *cqe = ctx->cqe_cached; struct io_uring_cqe *cqe = ctx->cqe_cached;
...@@ -103,7 +106,12 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) ...@@ -103,7 +106,12 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
return cqe; return cqe;
} }
return __io_get_cqe(ctx); return __io_get_cqe(ctx, overflow);
}
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
return io_get_cqe_overflow(ctx, false);
} }
static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
...@@ -221,17 +229,43 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) ...@@ -221,17 +229,43 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
} }
static inline bool io_run_task_work(void) static inline int io_run_task_work(void)
{ {
if (test_thread_flag(TIF_NOTIFY_SIGNAL)) { if (task_work_pending(current)) {
if (test_thread_flag(TIF_NOTIFY_SIGNAL))
clear_notify_signal();
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
clear_notify_signal(); task_work_run();
if (task_work_pending(current)) return 1;
task_work_run();
return true;
} }
return false; return 0;
}
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{
return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
!wq_list_empty(&ctx->work_llist);
}
static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
{
int ret = 0;
int ret2;
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
ret = io_run_local_work(ctx);
/* want to run this after in case more is added */
ret2 = io_run_task_work();
/* Try propagate error in favour of if tasks were run,
* but still make sure to run them if requested
*/
if (ret >= 0)
ret += ret2;
return ret;
} }
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
...@@ -301,4 +335,10 @@ static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) ...@@ -301,4 +335,10 @@ static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
return container_of(node, struct io_kiocb, comp_list); return container_of(node, struct io_kiocb, comp_list);
} }
static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
{
return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
ctx->submitter_task == current);
}
#endif #endif
...@@ -86,18 +86,6 @@ static inline bool io_do_buffer_select(struct io_kiocb *req) ...@@ -86,18 +86,6 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{ {
/*
* READV uses fields in `struct io_rw` (len/addr) to stash the selected
* buffer data. However if that buffer is recycled the original request
* data stored in addr is lost. Therefore forbid recycling for now.
*/
if (req->opcode == IORING_OP_READV) {
if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
req->buf_list->head++;
req->buf_list = NULL;
}
return;
}
if (req->flags & REQ_F_BUFFER_SELECTED) if (req->flags & REQ_F_BUFFER_SELECTED)
io_kbuf_recycle_legacy(req, issue_flags); io_kbuf_recycle_legacy(req, issue_flags);
if (req->flags & REQ_F_BUFFER_RING) if (req->flags & REQ_F_BUFFER_RING)
......
...@@ -55,21 +55,14 @@ struct io_sr_msg { ...@@ -55,21 +55,14 @@ struct io_sr_msg {
struct user_msghdr __user *umsg; struct user_msghdr __user *umsg;
void __user *buf; void __user *buf;
}; };
unsigned len;
unsigned done_io;
unsigned msg_flags; unsigned msg_flags;
unsigned flags; u16 flags;
size_t len; /* initialised and used only by !msg send variants */
size_t done_io; u16 addr_len;
};
struct io_sendzc {
struct file *file;
void __user *buf;
size_t len;
unsigned msg_flags;
unsigned flags;
unsigned addr_len;
void __user *addr; void __user *addr;
size_t done_io; /* used only for send zerocopy */
struct io_kiocb *notif; struct io_kiocb *notif;
}; };
...@@ -126,28 +119,36 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) ...@@ -126,28 +119,36 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
} }
} }
static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req, static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
unsigned int issue_flags) unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_cache_entry *entry; struct io_cache_entry *entry;
struct io_async_msghdr *hdr;
if (!(issue_flags & IO_URING_F_UNLOCKED) && if (!(issue_flags & IO_URING_F_UNLOCKED) &&
(entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) { (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
struct io_async_msghdr *hdr;
hdr = container_of(entry, struct io_async_msghdr, cache); hdr = container_of(entry, struct io_async_msghdr, cache);
hdr->free_iov = NULL;
req->flags |= REQ_F_ASYNC_DATA; req->flags |= REQ_F_ASYNC_DATA;
req->async_data = hdr; req->async_data = hdr;
return hdr; return hdr;
} }
if (!io_alloc_async_data(req)) if (!io_alloc_async_data(req)) {
return req->async_data; hdr = req->async_data;
hdr->free_iov = NULL;
return hdr;
}
return NULL; return NULL;
} }
static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
{
/* ->prep_async is always called from the submission context */
return io_msg_alloc_async(req, 0);
}
static int io_setup_async_msg(struct io_kiocb *req, static int io_setup_async_msg(struct io_kiocb *req,
struct io_async_msghdr *kmsg, struct io_async_msghdr *kmsg,
unsigned int issue_flags) unsigned int issue_flags)
...@@ -156,17 +157,20 @@ static int io_setup_async_msg(struct io_kiocb *req, ...@@ -156,17 +157,20 @@ static int io_setup_async_msg(struct io_kiocb *req,
if (req_has_async_data(req)) if (req_has_async_data(req))
return -EAGAIN; return -EAGAIN;
async_msg = io_recvmsg_alloc_async(req, issue_flags); async_msg = io_msg_alloc_async(req, issue_flags);
if (!async_msg) { if (!async_msg) {
kfree(kmsg->free_iov); kfree(kmsg->free_iov);
return -ENOMEM; return -ENOMEM;
} }
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
memcpy(async_msg, kmsg, sizeof(*kmsg)); memcpy(async_msg, kmsg, sizeof(*kmsg));
async_msg->msg.msg_name = &async_msg->addr; if (async_msg->msg.msg_name)
async_msg->msg.msg_name = &async_msg->addr;
/* if were using fast_iov, set it to the new one */ /* if were using fast_iov, set it to the new one */
if (!async_msg->free_iov) if (!kmsg->free_iov) {
async_msg->msg.msg_iter.iov = async_msg->fast_iov; size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
}
return -EAGAIN; return -EAGAIN;
} }
...@@ -182,34 +186,34 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req, ...@@ -182,34 +186,34 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
&iomsg->free_iov); &iomsg->free_iov);
} }
int io_sendzc_prep_async(struct io_kiocb *req) int io_send_prep_async(struct io_kiocb *req)
{ {
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc); struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io; struct io_async_msghdr *io;
int ret; int ret;
if (!zc->addr || req_has_async_data(req)) if (!zc->addr || req_has_async_data(req))
return 0; return 0;
if (io_alloc_async_data(req)) io = io_msg_alloc_async_prep(req);
if (!io)
return -ENOMEM; return -ENOMEM;
io = req->async_data;
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr); ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
return ret; return ret;
} }
static int io_setup_async_addr(struct io_kiocb *req, static int io_setup_async_addr(struct io_kiocb *req,
struct sockaddr_storage *addr, struct sockaddr_storage *addr_storage,
unsigned int issue_flags) unsigned int issue_flags)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io; struct io_async_msghdr *io;
if (!addr || req_has_async_data(req)) if (!sr->addr || req_has_async_data(req))
return -EAGAIN; return -EAGAIN;
if (io_alloc_async_data(req)) io = io_msg_alloc_async(req, issue_flags);
if (!io)
return -ENOMEM; return -ENOMEM;
io = req->async_data; memcpy(&io->addr, addr_storage, sizeof(io->addr));
memcpy(&io->addr, addr, sizeof(io->addr));
return -EAGAIN; return -EAGAIN;
} }
...@@ -217,6 +221,8 @@ int io_sendmsg_prep_async(struct io_kiocb *req) ...@@ -217,6 +221,8 @@ int io_sendmsg_prep_async(struct io_kiocb *req)
{ {
int ret; int ret;
if (!io_msg_alloc_async_prep(req))
return -ENOMEM;
ret = io_sendmsg_copy_hdr(req, req->async_data); ret = io_sendmsg_copy_hdr(req, req->async_data);
if (!ret) if (!ret)
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
...@@ -234,8 +240,14 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -234,8 +240,14 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
if (unlikely(sqe->file_index || sqe->addr2)) if (req->opcode == IORING_OP_SEND) {
if (READ_ONCE(sqe->__pad3[0]))
return -EINVAL;
sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
sr->addr_len = READ_ONCE(sqe->addr_len);
} else if (sqe->addr2 || sqe->file_index) {
return -EINVAL; return -EINVAL;
}
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len); sr->len = READ_ONCE(sqe->len);
...@@ -291,13 +303,13 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -291,13 +303,13 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (ret < min_ret) { if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_msg(req, kmsg, issue_flags); return io_setup_async_msg(req, kmsg, issue_flags);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) { if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret; sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO; req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_msg(req, kmsg, issue_flags); return io_setup_async_msg(req, kmsg, issue_flags);
} }
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req); req_set_fail(req);
} }
/* fast path, check for non-NULL to avoid function call */ /* fast path, check for non-NULL to avoid function call */
...@@ -315,6 +327,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -315,6 +327,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
int io_send(struct io_kiocb *req, unsigned int issue_flags) int io_send(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct sockaddr_storage __address;
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg; struct msghdr msg;
struct iovec iov; struct iovec iov;
...@@ -323,9 +336,29 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -323,9 +336,29 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
int min_ret = 0; int min_ret = 0;
int ret; int ret;
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
msg.msg_ubuf = NULL;
if (sr->addr) {
if (req_has_async_data(req)) {
struct io_async_msghdr *io = req->async_data;
msg.msg_name = &io->addr;
} else {
ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
if (unlikely(ret < 0))
return ret;
msg.msg_name = (struct sockaddr *)&__address;
}
msg.msg_namelen = sr->addr_len;
}
if (!(req->flags & REQ_F_POLLED) && if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST)) (sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN; return io_setup_async_addr(req, &__address, issue_flags);
sock = sock_from_file(req->file); sock = sock_from_file(req->file);
if (unlikely(!sock)) if (unlikely(!sock))
...@@ -335,12 +368,6 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -335,12 +368,6 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
msg.msg_ubuf = NULL;
flags = sr->msg_flags; flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK) if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
...@@ -351,16 +378,17 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -351,16 +378,17 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
ret = sock_sendmsg(sock, &msg); ret = sock_sendmsg(sock, &msg);
if (ret < min_ret) { if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return -EAGAIN; return io_setup_async_addr(req, &__address, issue_flags);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) { if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret; sr->len -= ret;
sr->buf += ret; sr->buf += ret;
sr->done_io += ret; sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO; req->flags |= REQ_F_PARTIAL_IO;
return -EAGAIN; return io_setup_async_addr(req, &__address, issue_flags);
} }
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req); req_set_fail(req);
} }
if (ret >= 0) if (ret >= 0)
...@@ -454,7 +482,6 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, ...@@ -454,7 +482,6 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
if (msg.msg_iovlen == 0) { if (msg.msg_iovlen == 0) {
sr->len = 0; sr->len = 0;
iomsg->free_iov = NULL;
} else if (msg.msg_iovlen > 1) { } else if (msg.msg_iovlen > 1) {
return -EINVAL; return -EINVAL;
} else { } else {
...@@ -465,7 +492,6 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, ...@@ -465,7 +492,6 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
if (clen < 0) if (clen < 0)
return -EINVAL; return -EINVAL;
sr->len = clen; sr->len = clen;
iomsg->free_iov = NULL;
} }
if (req->flags & REQ_F_APOLL_MULTISHOT) { if (req->flags & REQ_F_APOLL_MULTISHOT) {
...@@ -504,6 +530,8 @@ int io_recvmsg_prep_async(struct io_kiocb *req) ...@@ -504,6 +530,8 @@ int io_recvmsg_prep_async(struct io_kiocb *req)
{ {
int ret; int ret;
if (!io_msg_alloc_async_prep(req))
return -ENOMEM;
ret = io_recvmsg_copy_hdr(req, req->async_data); ret = io_recvmsg_copy_hdr(req, req->async_data);
if (!ret) if (!ret)
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
...@@ -751,13 +779,13 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -751,13 +779,13 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
} }
return ret; return ret;
} }
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) { if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret; sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO; req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_msg(req, kmsg, issue_flags); return io_setup_async_msg(req, kmsg, issue_flags);
} }
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req); req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
req_set_fail(req); req_set_fail(req);
...@@ -847,8 +875,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -847,8 +875,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN; return -EAGAIN;
} }
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) { if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret; sr->len -= ret;
sr->buf += ret; sr->buf += ret;
...@@ -856,6 +882,8 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -856,6 +882,8 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
req->flags |= REQ_F_PARTIAL_IO; req->flags |= REQ_F_PARTIAL_IO;
return -EAGAIN; return -EAGAIN;
} }
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req); req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free: out_free:
...@@ -879,23 +907,30 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -879,23 +907,30 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
return ret; return ret;
} }
void io_sendzc_cleanup(struct io_kiocb *req) void io_send_zc_cleanup(struct io_kiocb *req)
{ {
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc); struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
zc->notif->flags |= REQ_F_CQE_SKIP; if (req_has_async_data(req)) {
io_notif_flush(zc->notif); io = req->async_data;
zc->notif = NULL; /* might be ->fast_iov if *msg_copy_hdr failed */
if (io->free_iov != io->fast_iov)
kfree(io->free_iov);
}
if (zc->notif) {
io_notif_flush(zc->notif);
zc->notif = NULL;
}
} }
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc); struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *notif; struct io_kiocb *notif;
if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3) || if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
READ_ONCE(sqe->__pad3[0]))
return -EINVAL; return -EINVAL;
/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
if (req->flags & REQ_F_CQE_SKIP) if (req->flags & REQ_F_CQE_SKIP)
...@@ -922,14 +957,24 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -922,14 +957,24 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_req_set_rsrc_node(notif, ctx, 0); io_req_set_rsrc_node(notif, ctx, 0);
} }
if (req->opcode == IORING_OP_SEND_ZC) {
if (READ_ONCE(sqe->__pad3[0]))
return -EINVAL;
zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
zc->addr_len = READ_ONCE(sqe->addr_len);
} else {
if (unlikely(sqe->addr2 || sqe->file_index))
return -EINVAL;
if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
return -EINVAL;
}
zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
zc->len = READ_ONCE(sqe->len); zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
if (zc->msg_flags & MSG_DONTWAIT) if (zc->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT; req->flags |= REQ_F_NOWAIT;
zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
zc->addr_len = READ_ONCE(sqe->addr_len);
zc->done_io = 0; zc->done_io = 0;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -939,6 +984,13 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -939,6 +984,13 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0; return 0;
} }
static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
skb_zcopy_downgrade_managed(skb);
return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
}
static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, size_t length) struct iov_iter *from, size_t length)
{ {
...@@ -949,13 +1001,10 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, ...@@ -949,13 +1001,10 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
ssize_t copied = 0; ssize_t copied = 0;
unsigned long truesize = 0; unsigned long truesize = 0;
if (!shinfo->nr_frags) if (!frag)
shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
else if (unlikely(!skb_zcopy_managed(skb)))
if (!skb_zcopy_managed(skb) || !iov_iter_is_bvec(from)) {
skb_zcopy_downgrade_managed(skb);
return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
}
bi.bi_size = min(from->count, length); bi.bi_size = min(from->count, length);
bi.bi_bvec_done = from->iov_offset; bi.bi_bvec_done = from->iov_offset;
...@@ -993,14 +1042,14 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, ...@@ -993,14 +1042,14 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
return ret; return ret;
} }
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags) int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct sockaddr_storage __address, *addr = NULL; struct sockaddr_storage __address;
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc); struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg; struct msghdr msg;
struct iovec iov; struct iovec iov;
struct socket *sock; struct socket *sock;
unsigned msg_flags, cflags; unsigned msg_flags;
int ret, min_ret = 0; int ret, min_ret = 0;
sock = sock_from_file(req->file); sock = sock_from_file(req->file);
...@@ -1016,26 +1065,26 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1016,26 +1065,26 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) { if (req_has_async_data(req)) {
struct io_async_msghdr *io = req->async_data; struct io_async_msghdr *io = req->async_data;
msg.msg_name = addr = &io->addr; msg.msg_name = &io->addr;
} else { } else {
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address); ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
msg.msg_name = (struct sockaddr *)&__address; msg.msg_name = (struct sockaddr *)&__address;
addr = &__address;
} }
msg.msg_namelen = zc->addr_len; msg.msg_namelen = zc->addr_len;
} }
if (!(req->flags & REQ_F_POLLED) && if (!(req->flags & REQ_F_POLLED) &&
(zc->flags & IORING_RECVSEND_POLL_FIRST)) (zc->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_addr(req, addr, issue_flags); return io_setup_async_addr(req, &__address, issue_flags);
if (zc->flags & IORING_RECVSEND_FIXED_BUF) { if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu, ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
(u64)(uintptr_t)zc->buf, zc->len); (u64)(uintptr_t)zc->buf, zc->len);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
msg.sg_from_iter = io_sg_from_iter;
} else { } else {
ret = import_single_range(WRITE, zc->buf, zc->len, &iov, ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
&msg.msg_iter); &msg.msg_iter);
...@@ -1044,6 +1093,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1044,6 +1093,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
ret = io_notif_account_mem(zc->notif, zc->len); ret = io_notif_account_mem(zc->notif, zc->len);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
msg.sg_from_iter = io_sg_from_iter_iovec;
} }
msg_flags = zc->msg_flags | MSG_ZEROCOPY; msg_flags = zc->msg_flags | MSG_ZEROCOPY;
...@@ -1054,22 +1104,19 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1054,22 +1104,19 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
msg.msg_flags = msg_flags; msg.msg_flags = msg_flags;
msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
msg.sg_from_iter = io_sg_from_iter;
ret = sock_sendmsg(sock, &msg); ret = sock_sendmsg(sock, &msg);
if (unlikely(ret < min_ret)) { if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_addr(req, addr, issue_flags); return io_setup_async_addr(req, &__address, issue_flags);
if (ret > 0 && io_net_retry(sock, msg.msg_flags)) { if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
zc->len -= ret; zc->len -= ret;
zc->buf += ret; zc->buf += ret;
zc->done_io += ret; zc->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO; req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_addr(req, addr, issue_flags); return io_setup_async_addr(req, &__address, issue_flags);
} }
if (ret < 0 && !zc->done_io)
zc->notif->flags |= REQ_F_CQE_SKIP;
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
ret = -EINTR; ret = -EINTR;
req_set_fail(req); req_set_fail(req);
...@@ -1080,13 +1127,102 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1080,13 +1127,102 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
else if (zc->done_io) else if (zc->done_io)
ret = zc->done_io; ret = zc->done_io;
io_notif_flush(zc->notif); /*
req->flags &= ~REQ_F_NEED_CLEANUP; * If we're in io-wq we can't rely on tw ordering guarantees, defer
cflags = ret >= 0 ? IORING_CQE_F_MORE : 0; * flushing notif to io_send_zc_cleanup()
io_req_set_res(req, ret, cflags); */
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(zc->notif);
req->flags &= ~REQ_F_NEED_CLEANUP;
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
return IOU_OK; return IOU_OK;
} }
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
if (req_has_async_data(req)) {
kmsg = req->async_data;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
flags = sr->msg_flags | MSG_ZEROCOPY;
if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_msg(req, kmsg, issue_flags);
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov) {
kfree(kmsg->free_iov);
kmsg->free_iov = NULL;
}
io_netmsg_recycle(req, issue_flags);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
/*
* If we're in io-wq we can't rely on tw ordering guarantees, defer
* flushing notif to io_send_zc_cleanup()
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(sr->notif);
req->flags &= ~REQ_F_NEED_CLEANUP;
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
return IOU_OK;
}
void io_sendrecv_fail(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
if (req->flags & REQ_F_PARTIAL_IO)
req->cqe.res = sr->done_io;
if ((req->flags & REQ_F_NEED_CLEANUP) &&
(req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
req->cqe.flags |= IORING_CQE_F_MORE;
}
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
......
...@@ -31,18 +31,21 @@ struct io_async_connect { ...@@ -31,18 +31,21 @@ struct io_async_connect {
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags); int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc_prep_async(struct io_kiocb *req);
int io_sendmsg_prep_async(struct io_kiocb *req); int io_sendmsg_prep_async(struct io_kiocb *req);
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req); void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags); int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
int io_send(struct io_kiocb *req, unsigned int issue_flags); int io_send(struct io_kiocb *req, unsigned int issue_flags);
int io_send_prep_async(struct io_kiocb *req);
int io_recvmsg_prep_async(struct io_kiocb *req); int io_recvmsg_prep_async(struct io_kiocb *req);
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags); int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
int io_recv(struct io_kiocb *req, unsigned int issue_flags); int io_recv(struct io_kiocb *req, unsigned int issue_flags);
void io_sendrecv_fail(struct io_kiocb *req);
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_accept(struct io_kiocb *req, unsigned int issue_flags); int io_accept(struct io_kiocb *req, unsigned int issue_flags);
...@@ -53,9 +56,10 @@ int io_connect_prep_async(struct io_kiocb *req); ...@@ -53,9 +56,10 @@ int io_connect_prep_async(struct io_kiocb *req);
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_connect(struct io_kiocb *req, unsigned int issue_flags); int io_connect(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags); int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
void io_sendzc_cleanup(struct io_kiocb *req); int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
void io_send_zc_cleanup(struct io_kiocb *req);
void io_netmsg_cache_free(struct io_cache_entry *entry); void io_netmsg_cache_free(struct io_cache_entry *entry);
#else #else
......
...@@ -69,6 +69,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -69,6 +69,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_read, .issue = io_read,
.prep_async = io_readv_prep_async, .prep_async = io_readv_prep_async,
.cleanup = io_readv_writev_cleanup, .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
}, },
[IORING_OP_WRITEV] = { [IORING_OP_WRITEV] = {
.needs_file = 1, .needs_file = 1,
...@@ -85,6 +86,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -85,6 +86,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_write, .issue = io_write,
.prep_async = io_writev_prep_async, .prep_async = io_writev_prep_async,
.cleanup = io_readv_writev_cleanup, .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
}, },
[IORING_OP_FSYNC] = { [IORING_OP_FSYNC] = {
.needs_file = 1, .needs_file = 1,
...@@ -105,6 +107,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -105,6 +107,7 @@ const struct io_op_def io_op_defs[] = {
.name = "READ_FIXED", .name = "READ_FIXED",
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_read, .issue = io_read,
.fail = io_rw_fail,
}, },
[IORING_OP_WRITE_FIXED] = { [IORING_OP_WRITE_FIXED] = {
.needs_file = 1, .needs_file = 1,
...@@ -119,6 +122,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -119,6 +122,7 @@ const struct io_op_def io_op_defs[] = {
.name = "WRITE_FIXED", .name = "WRITE_FIXED",
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_write, .issue = io_write,
.fail = io_rw_fail,
}, },
[IORING_OP_POLL_ADD] = { [IORING_OP_POLL_ADD] = {
.needs_file = 1, .needs_file = 1,
...@@ -146,6 +150,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -146,6 +150,7 @@ const struct io_op_def io_op_defs[] = {
.unbound_nonreg_file = 1, .unbound_nonreg_file = 1,
.pollout = 1, .pollout = 1,
.ioprio = 1, .ioprio = 1,
.manual_alloc = 1,
.name = "SENDMSG", .name = "SENDMSG",
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
...@@ -153,6 +158,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -153,6 +158,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_sendmsg, .issue = io_sendmsg,
.prep_async = io_sendmsg_prep_async, .prep_async = io_sendmsg_prep_async,
.cleanup = io_sendmsg_recvmsg_cleanup, .cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail,
#else #else
.prep = io_eopnotsupp_prep, .prep = io_eopnotsupp_prep,
#endif #endif
...@@ -163,6 +169,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -163,6 +169,7 @@ const struct io_op_def io_op_defs[] = {
.pollin = 1, .pollin = 1,
.buffer_select = 1, .buffer_select = 1,
.ioprio = 1, .ioprio = 1,
.manual_alloc = 1,
.name = "RECVMSG", .name = "RECVMSG",
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
...@@ -170,6 +177,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -170,6 +177,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_recvmsg, .issue = io_recvmsg,
.prep_async = io_recvmsg_prep_async, .prep_async = io_recvmsg_prep_async,
.cleanup = io_sendmsg_recvmsg_cleanup, .cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail,
#else #else
.prep = io_eopnotsupp_prep, .prep = io_eopnotsupp_prep,
#endif #endif
...@@ -273,6 +281,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -273,6 +281,7 @@ const struct io_op_def io_op_defs[] = {
.name = "READ", .name = "READ",
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_read, .issue = io_read,
.fail = io_rw_fail,
}, },
[IORING_OP_WRITE] = { [IORING_OP_WRITE] = {
.needs_file = 1, .needs_file = 1,
...@@ -287,6 +296,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -287,6 +296,7 @@ const struct io_op_def io_op_defs[] = {
.name = "WRITE", .name = "WRITE",
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_write, .issue = io_write,
.fail = io_rw_fail,
}, },
[IORING_OP_FADVISE] = { [IORING_OP_FADVISE] = {
.needs_file = 1, .needs_file = 1,
...@@ -306,10 +316,14 @@ const struct io_op_def io_op_defs[] = { ...@@ -306,10 +316,14 @@ const struct io_op_def io_op_defs[] = {
.pollout = 1, .pollout = 1,
.audit_skip = 1, .audit_skip = 1,
.ioprio = 1, .ioprio = 1,
.manual_alloc = 1,
.name = "SEND", .name = "SEND",
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_sendmsg_prep, .prep = io_sendmsg_prep,
.issue = io_send, .issue = io_send,
.fail = io_sendrecv_fail,
.prep_async = io_send_prep_async,
#else #else
.prep = io_eopnotsupp_prep, .prep = io_eopnotsupp_prep,
#endif #endif
...@@ -325,6 +339,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -325,6 +339,7 @@ const struct io_op_def io_op_defs[] = {
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.prep = io_recvmsg_prep, .prep = io_recvmsg_prep,
.issue = io_recv, .issue = io_recv,
.fail = io_sendrecv_fail,
#else #else
.prep = io_eopnotsupp_prep, .prep = io_eopnotsupp_prep,
#endif #endif
...@@ -465,6 +480,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -465,6 +480,7 @@ const struct io_op_def io_op_defs[] = {
.needs_file = 1, .needs_file = 1,
.plug = 1, .plug = 1,
.name = "URING_CMD", .name = "URING_CMD",
.iopoll = 1,
.async_size = uring_cmd_pdu_size(1), .async_size = uring_cmd_pdu_size(1),
.prep = io_uring_cmd_prep, .prep = io_uring_cmd_prep,
.issue = io_uring_cmd, .issue = io_uring_cmd,
...@@ -480,10 +496,30 @@ const struct io_op_def io_op_defs[] = { ...@@ -480,10 +496,30 @@ const struct io_op_def io_op_defs[] = {
.manual_alloc = 1, .manual_alloc = 1,
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
.prep = io_sendzc_prep, .prep = io_send_zc_prep,
.issue = io_sendzc, .issue = io_send_zc,
.prep_async = io_sendzc_prep_async, .prep_async = io_send_prep_async,
.cleanup = io_sendzc_cleanup, .cleanup = io_send_zc_cleanup,
.fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
},
[IORING_OP_SENDMSG_ZC] = {
.name = "SENDMSG_ZC",
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
.audit_skip = 1,
.ioprio = 1,
.manual_alloc = 1,
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_send_zc_prep,
.issue = io_sendmsg_zc,
.prep_async = io_sendmsg_prep_async,
.cleanup = io_send_zc_cleanup,
.fail = io_sendrecv_fail,
#else #else
.prep = io_eopnotsupp_prep, .prep = io_eopnotsupp_prep,
#endif #endif
......
...@@ -36,6 +36,7 @@ struct io_op_def { ...@@ -36,6 +36,7 @@ struct io_op_def {
int (*issue)(struct io_kiocb *, unsigned int); int (*issue)(struct io_kiocb *, unsigned int);
int (*prep_async)(struct io_kiocb *); int (*prep_async)(struct io_kiocb *);
void (*cleanup)(struct io_kiocb *); void (*cleanup)(struct io_kiocb *);
void (*fail)(struct io_kiocb *);
}; };
extern const struct io_op_def io_op_defs[]; extern const struct io_op_def io_op_defs[];
......
...@@ -341,7 +341,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, ...@@ -341,7 +341,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
flush_delayed_work(&ctx->rsrc_put_work); flush_delayed_work(&ctx->rsrc_put_work);
reinit_completion(&data->done); reinit_completion(&data->done);
ret = io_run_task_work_sig(); ret = io_run_task_work_sig(ctx);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
} while (ret >= 0); } while (ret >= 0);
data->quiesce = false; data->quiesce = false;
......
...@@ -33,6 +33,46 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req) ...@@ -33,6 +33,46 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
return req->flags & REQ_F_SUPPORT_NOWAIT; return req->flags & REQ_F_SUPPORT_NOWAIT;
} }
#ifdef CONFIG_COMPAT
static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
{
struct compat_iovec __user *uiov;
compat_ssize_t clen;
uiov = u64_to_user_ptr(rw->addr);
if (!access_ok(uiov, sizeof(*uiov)))
return -EFAULT;
if (__get_user(clen, &uiov->iov_len))
return -EFAULT;
if (clen < 0)
return -EINVAL;
rw->len = clen;
return 0;
}
#endif
static int io_iov_buffer_select_prep(struct io_kiocb *req)
{
struct iovec __user *uiov;
struct iovec iov;
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->len != 1)
return -EINVAL;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
return io_iov_compat_buffer_select_prep(rw);
#endif
uiov = u64_to_user_ptr(rw->addr);
if (copy_from_user(&iov, uiov, sizeof(*uiov)))
return -EFAULT;
rw->len = iov.iov_len;
return 0;
}
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
...@@ -69,6 +109,16 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -69,6 +109,16 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
rw->addr = READ_ONCE(sqe->addr); rw->addr = READ_ONCE(sqe->addr);
rw->len = READ_ONCE(sqe->len); rw->len = READ_ONCE(sqe->len);
rw->flags = READ_ONCE(sqe->rw_flags); rw->flags = READ_ONCE(sqe->rw_flags);
/* Have to do this validation here, as this is in io_read() rw->len might
* have chanaged due to buffer selection
*/
if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
ret = io_iov_buffer_select_prep(req);
if (ret)
return ret;
}
return 0; return 0;
} }
...@@ -186,14 +236,6 @@ static void kiocb_end_write(struct io_kiocb *req) ...@@ -186,14 +236,6 @@ static void kiocb_end_write(struct io_kiocb *req)
static bool __io_complete_rw_common(struct io_kiocb *req, long res) static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{ {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
}
if (unlikely(res != req->cqe.res)) { if (unlikely(res != req->cqe.res)) {
if ((res == -EAGAIN || res == -EOPNOTSUPP) && if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
io_rw_should_reissue(req)) { io_rw_should_reissue(req)) {
...@@ -220,6 +262,20 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res) ...@@ -220,6 +262,20 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
return res; return res;
} }
static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
}
io_req_task_complete(req, locked);
}
static void io_complete_rw(struct kiocb *kiocb, long res) static void io_complete_rw(struct kiocb *kiocb, long res)
{ {
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
...@@ -228,7 +284,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res) ...@@ -228,7 +284,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
if (__io_complete_rw_common(req, res)) if (__io_complete_rw_common(req, res))
return; return;
io_req_set_res(req, io_fixup_rw_res(req, res), 0); io_req_set_res(req, io_fixup_rw_res(req, res), 0);
req->io_task_work.func = io_req_task_complete; req->io_task_work.func = io_req_rw_complete;
io_req_task_work_add(req); io_req_task_work_add(req);
} }
...@@ -279,79 +335,6 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, ...@@ -279,79 +335,6 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
return IOU_ISSUE_SKIP_COMPLETE; return IOU_ISSUE_SKIP_COMPLETE;
} }
#ifdef CONFIG_COMPAT
static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct compat_iovec __user *uiov;
compat_ssize_t clen;
void __user *buf;
size_t len;
uiov = u64_to_user_ptr(rw->addr);
if (!access_ok(uiov, sizeof(*uiov)))
return -EFAULT;
if (__get_user(clen, &uiov->iov_len))
return -EFAULT;
if (clen < 0)
return -EINVAL;
len = clen;
buf = io_buffer_select(req, &len, issue_flags);
if (!buf)
return -ENOBUFS;
rw->addr = (unsigned long) buf;
iov[0].iov_base = buf;
rw->len = iov[0].iov_len = (compat_size_t) len;
return 0;
}
#endif
static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
void __user *buf;
ssize_t len;
if (copy_from_user(iov, uiov, sizeof(*uiov)))
return -EFAULT;
len = iov[0].iov_len;
if (len < 0)
return -EINVAL;
buf = io_buffer_select(req, &len, issue_flags);
if (!buf)
return -ENOBUFS;
rw->addr = (unsigned long) buf;
iov[0].iov_base = buf;
rw->len = iov[0].iov_len = len;
return 0;
}
static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
iov[0].iov_base = u64_to_user_ptr(rw->addr);
iov[0].iov_len = rw->len;
return 0;
}
if (rw->len != 1)
return -EINVAL;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
return io_compat_import(req, iov, issue_flags);
#endif
return __io_iov_buffer_select(req, iov, issue_flags);
}
static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
struct io_rw_state *s, struct io_rw_state *s,
unsigned int issue_flags) unsigned int issue_flags)
...@@ -374,7 +357,8 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, ...@@ -374,7 +357,8 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
buf = u64_to_user_ptr(rw->addr); buf = u64_to_user_ptr(rw->addr);
sqe_len = rw->len; sqe_len = rw->len;
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
(req->flags & REQ_F_BUFFER_SELECT)) {
if (io_do_buffer_select(req)) { if (io_do_buffer_select(req)) {
buf = io_buffer_select(req, &sqe_len, issue_flags); buf = io_buffer_select(req, &sqe_len, issue_flags);
if (!buf) if (!buf)
...@@ -390,14 +374,6 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, ...@@ -390,14 +374,6 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
} }
iovec = s->fast_iov; iovec = s->fast_iov;
if (req->flags & REQ_F_BUFFER_SELECT) {
ret = io_iov_buffer_select(req, iovec, issue_flags);
if (ret)
return ERR_PTR(ret);
iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len);
return NULL;
}
ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter, ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
req->ctx->compat); req->ctx->compat);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
...@@ -794,10 +770,12 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) ...@@ -794,10 +770,12 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
iov_iter_restore(&s->iter, &s->iter_state); iov_iter_restore(&s->iter, &s->iter_state);
ret2 = io_setup_async_rw(req, iovec, s, true); ret2 = io_setup_async_rw(req, iovec, s, true);
if (ret2)
return ret2;
iovec = NULL; iovec = NULL;
if (ret2) {
ret = ret > 0 ? ret : ret2;
goto done;
}
io = req->async_data; io = req->async_data;
s = &io->s; s = &io->s;
/* /*
...@@ -823,6 +801,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) ...@@ -823,6 +801,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN; return -EAGAIN;
} }
req->cqe.res = iov_iter_count(&s->iter);
/* /*
* Now retry read with the IOCB_WAITQ parts set in the iocb. If * Now retry read with the IOCB_WAITQ parts set in the iocb. If
* we get -EIOCBQUEUED, then we'll get a notification when the * we get -EIOCBQUEUED, then we'll get a notification when the
...@@ -984,6 +963,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) ...@@ -984,6 +963,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
io_cqring_wake(ctx); io_cqring_wake(ctx);
} }
void io_rw_fail(struct io_kiocb *req)
{
int res;
res = io_fixup_rw_res(req, req->cqe.res);
io_req_set_res(req, res, req->cqe.flags);
}
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{ {
struct io_wq_work_node *pos, *start, *prev; struct io_wq_work_node *pos, *start, *prev;
...@@ -1000,7 +987,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) ...@@ -1000,7 +987,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
wq_list_for_each(pos, start, &ctx->iopoll_list) { wq_list_for_each(pos, start, &ctx->iopoll_list) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); struct file *file = req->file;
int ret; int ret;
/* /*
...@@ -1011,7 +998,17 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) ...@@ -1011,7 +998,17 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (READ_ONCE(req->iopoll_completed)) if (READ_ONCE(req->iopoll_completed))
break; break;
ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags); if (req->opcode == IORING_OP_URING_CMD) {
struct io_uring_cmd *ioucmd;
ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
poll_flags);
} else {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
}
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
else if (ret) else if (ret)
......
...@@ -21,3 +21,4 @@ int io_readv_prep_async(struct io_kiocb *req); ...@@ -21,3 +21,4 @@ int io_readv_prep_async(struct io_kiocb *req);
int io_write(struct io_kiocb *req, unsigned int issue_flags); int io_write(struct io_kiocb *req, unsigned int issue_flags);
int io_writev_prep_async(struct io_kiocb *req); int io_writev_prep_async(struct io_kiocb *req);
void io_readv_writev_cleanup(struct io_kiocb *req); void io_readv_writev_cleanup(struct io_kiocb *req);
void io_rw_fail(struct io_kiocb *req);
...@@ -149,11 +149,10 @@ static inline void io_remove_next_linked(struct io_kiocb *req) ...@@ -149,11 +149,10 @@ static inline void io_remove_next_linked(struct io_kiocb *req)
nxt->link = NULL; nxt->link = NULL;
} }
bool io_disarm_next(struct io_kiocb *req) void io_disarm_next(struct io_kiocb *req)
__must_hold(&req->ctx->completion_lock) __must_hold(&req->ctx->completion_lock)
{ {
struct io_kiocb *link = NULL; struct io_kiocb *link = NULL;
bool posted = false;
if (req->flags & REQ_F_ARM_LTIMEOUT) { if (req->flags & REQ_F_ARM_LTIMEOUT) {
link = req->link; link = req->link;
...@@ -161,7 +160,6 @@ bool io_disarm_next(struct io_kiocb *req) ...@@ -161,7 +160,6 @@ bool io_disarm_next(struct io_kiocb *req)
if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
io_remove_next_linked(req); io_remove_next_linked(req);
io_req_tw_post_queue(link, -ECANCELED, 0); io_req_tw_post_queue(link, -ECANCELED, 0);
posted = true;
} }
} else if (req->flags & REQ_F_LINK_TIMEOUT) { } else if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -169,17 +167,12 @@ bool io_disarm_next(struct io_kiocb *req) ...@@ -169,17 +167,12 @@ bool io_disarm_next(struct io_kiocb *req)
spin_lock_irq(&ctx->timeout_lock); spin_lock_irq(&ctx->timeout_lock);
link = io_disarm_linked_timeout(req); link = io_disarm_linked_timeout(req);
spin_unlock_irq(&ctx->timeout_lock); spin_unlock_irq(&ctx->timeout_lock);
if (link) { if (link)
posted = true;
io_req_tw_post_queue(link, -ECANCELED, 0); io_req_tw_post_queue(link, -ECANCELED, 0);
}
} }
if (unlikely((req->flags & REQ_F_FAIL) && if (unlikely((req->flags & REQ_F_FAIL) &&
!(req->flags & REQ_F_HARDLINK))) { !(req->flags & REQ_F_HARDLINK)))
posted |= (req->link != NULL);
io_fail_links(req); io_fail_links(req);
}
return posted;
} }
struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
......
...@@ -27,7 +27,7 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd); ...@@ -27,7 +27,7 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all); bool cancel_all);
void io_queue_linked_timeout(struct io_kiocb *req); void io_queue_linked_timeout(struct io_kiocb *req);
bool io_disarm_next(struct io_kiocb *req); void io_disarm_next(struct io_kiocb *req);
int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
......
...@@ -50,7 +50,11 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) ...@@ -50,7 +50,11 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
io_req_set_res(req, ret, 0); io_req_set_res(req, ret, 0);
if (req->ctx->flags & IORING_SETUP_CQE32) if (req->ctx->flags & IORING_SETUP_CQE32)
io_req_set_cqe32_extra(req, res2, 0); io_req_set_cqe32_extra(req, res2, 0);
__io_req_complete(req, 0); if (req->ctx->flags & IORING_SETUP_IOPOLL)
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
smp_store_release(&req->iopoll_completed, 1);
else
__io_req_complete(req, 0);
} }
EXPORT_SYMBOL_GPL(io_uring_cmd_done); EXPORT_SYMBOL_GPL(io_uring_cmd_done);
...@@ -97,8 +101,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) ...@@ -97,8 +101,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
issue_flags |= IO_URING_F_SQE128; issue_flags |= IO_URING_F_SQE128;
if (ctx->flags & IORING_SETUP_CQE32) if (ctx->flags & IORING_SETUP_CQE32)
issue_flags |= IO_URING_F_CQE32; issue_flags |= IO_URING_F_CQE32;
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL) {
issue_flags |= IO_URING_F_IOPOLL; issue_flags |= IO_URING_F_IOPOLL;
req->iopoll_completed = 0;
WRITE_ONCE(ioucmd->cookie, NULL);
}
if (req_has_async_data(req)) if (req_has_async_data(req))
ioucmd->cmd = req->async_data; ioucmd->cmd = req->async_data;
......
...@@ -400,7 +400,6 @@ static void do_tx(int domain, int type, int protocol) ...@@ -400,7 +400,6 @@ static void do_tx(int domain, int type, int protocol)
cfg_payload_len, msg_flags); cfg_payload_len, msg_flags);
sqe->user_data = NONZC_TAG; sqe->user_data = NONZC_TAG;
} else { } else {
compl_cqes++;
io_uring_prep_sendzc(sqe, fd, payload, io_uring_prep_sendzc(sqe, fd, payload,
cfg_payload_len, cfg_payload_len,
msg_flags, zc_flags); msg_flags, zc_flags);
...@@ -430,18 +429,23 @@ static void do_tx(int domain, int type, int protocol) ...@@ -430,18 +429,23 @@ static void do_tx(int domain, int type, int protocol)
if (cqe->flags & IORING_CQE_F_NOTIF) { if (cqe->flags & IORING_CQE_F_NOTIF) {
if (cqe->flags & IORING_CQE_F_MORE) if (cqe->flags & IORING_CQE_F_MORE)
error(1, -EINVAL, "invalid notif flags"); error(1, -EINVAL, "invalid notif flags");
if (compl_cqes <= 0)
error(1, -EINVAL, "notification mismatch");
compl_cqes--; compl_cqes--;
i--; i--;
} else if (cqe->res <= 0) { io_uring_cqe_seen(&ring);
if (cqe->flags & IORING_CQE_F_MORE) continue;
error(1, cqe->res, "more with a failed send"); }
error(1, cqe->res, "send failed"); if (cqe->flags & IORING_CQE_F_MORE) {
} else { if (cqe->user_data != ZC_TAG)
if (cqe->user_data == ZC_TAG && error(1, cqe->res, "unexpected F_MORE");
!(cqe->flags & IORING_CQE_F_MORE)) compl_cqes++;
error(1, cqe->res, "missing more flag"); }
if (cqe->res >= 0) {
packets++; packets++;
bytes += cqe->res; bytes += cqe->res;
} else if (cqe->res != -EAGAIN) {
error(1, cqe->res, "send failed");
} }
io_uring_cqe_seen(&ring); io_uring_cqe_seen(&ring);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment