Commit 1ee08de1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.8/io_uring-2020-06-01' of git://git.kernel.dk/linux-block

Pull io_uring updates from Jens Axboe:
 "A relatively quiet round, mostly just fixes and code improvements. In
particular:

   - Make statx just use the generic statx handler, instead of open
     coding it. We don't need that anymore, as we always call it async
     safe (Bijan)

   - Enable closing of the ring itself. Also fixes O_PATH closure (me)

   - Properly name completion members (me)

   - Batch reap of dead file registrations (me)

   - Allow IORING_OP_POLL with double waitqueues (me)

   - Add tee(2) support (Pavel)

   - Remove double off read (Pavel)

   - Fix overflow cancellations (Pavel)

   - Improve CQ timeouts (Pavel)

   - Async defer drain fixes (Pavel)

   - Add support for enabling/disabling notifications on a registered
     eventfd (Stefano)

   - Remove dead state parameter (Xiaoguang)

   - Disable SQPOLL submit on dying ctx (Xiaoguang)

   - Various code cleanups"

* tag 'for-5.8/io_uring-2020-06-01' of git://git.kernel.dk/linux-block: (29 commits)
  io_uring: fix overflowed reqs cancellation
  io_uring: off timeouts based only on completions
  io_uring: move timeouts flushing to a helper
  statx: hide interfaces no longer used by io_uring
  io_uring: call statx directly
  statx: allow system call to be invoked from io_uring
  io_uring: add io_statx structure
  io_uring: get rid of manual punting in io_close
  io_uring: separate DRAIN flushing into a cold path
  io_uring: don't re-read sqe->off in timeout_prep()
  io_uring: simplify io_timeout locking
  io_uring: fix flush req->refs underflow
  io_uring: don't submit sqes when ctx->refs is dying
  io_uring: async task poll trigger cleanup
  io_uring: add tee(2) support
  splice: export do_tee()
  io_uring: don't repeat valid flag list
  io_uring: rename io_file_put()
  io_uring: remove req->needs_fixed_files
  io_uring: cleanup io_poll_remove_one() logic
  ...
parents bce159d7 7b53d598
...@@ -185,5 +185,5 @@ int sb_init_dio_done_wq(struct super_block *sb); ...@@ -185,5 +185,5 @@ int sb_init_dio_done_wq(struct super_block *sb);
/* /*
* fs/stat.c: * fs/stat.c:
*/ */
unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags); int do_statx(int dfd, const char __user *filename, unsigned flags,
int cp_statx(const struct kstat *stat, struct statx __user *buffer); unsigned int mask, struct statx __user *buffer);
This diff is collapsed.
...@@ -1754,8 +1754,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, ...@@ -1754,8 +1754,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* The 'flags' used are the SPLICE_F_* variants, currently the only * The 'flags' used are the SPLICE_F_* variants, currently the only
* applicable one is SPLICE_F_NONBLOCK. * applicable one is SPLICE_F_NONBLOCK.
*/ */
static long do_tee(struct file *in, struct file *out, size_t len, long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags)
unsigned int flags)
{ {
struct pipe_inode_info *ipipe = get_pipe_info(in); struct pipe_inode_info *ipipe = get_pipe_info(in);
struct pipe_inode_info *opipe = get_pipe_info(out); struct pipe_inode_info *opipe = get_pipe_info(out);
......
...@@ -153,7 +153,8 @@ int vfs_statx_fd(unsigned int fd, struct kstat *stat, ...@@ -153,7 +153,8 @@ int vfs_statx_fd(unsigned int fd, struct kstat *stat,
} }
EXPORT_SYMBOL(vfs_statx_fd); EXPORT_SYMBOL(vfs_statx_fd);
inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags) static inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags,
int flags)
{ {
if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0) AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
...@@ -539,7 +540,7 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, ...@@ -539,7 +540,7 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
} }
#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
noinline_for_stack int static noinline_for_stack int
cp_statx(const struct kstat *stat, struct statx __user *buffer) cp_statx(const struct kstat *stat, struct statx __user *buffer)
{ {
struct statx tmp; struct statx tmp;
...@@ -574,6 +575,24 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer) ...@@ -574,6 +575,24 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer)
return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
} }
int do_statx(int dfd, const char __user *filename, unsigned flags,
unsigned int mask, struct statx __user *buffer)
{
struct kstat stat;
int error;
if (mask & STATX__RESERVED)
return -EINVAL;
if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
return -EINVAL;
error = vfs_statx(dfd, filename, flags, &stat, mask);
if (error)
return error;
return cp_statx(&stat, buffer);
}
/** /**
* sys_statx - System call to get enhanced stats * sys_statx - System call to get enhanced stats
* @dfd: Base directory to pathwalk from *or* fd to stat. * @dfd: Base directory to pathwalk from *or* fd to stat.
...@@ -590,19 +609,7 @@ SYSCALL_DEFINE5(statx, ...@@ -590,19 +609,7 @@ SYSCALL_DEFINE5(statx,
unsigned int, mask, unsigned int, mask,
struct statx __user *, buffer) struct statx __user *, buffer)
{ {
struct kstat stat; return do_statx(dfd, filename, flags, mask, buffer);
int error;
if (mask & STATX__RESERVED)
return -EINVAL;
if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
return -EINVAL;
error = vfs_statx(dfd, filename, flags, &stat, mask);
if (error)
return error;
return cp_statx(&stat, buffer);
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
......
...@@ -82,6 +82,9 @@ extern long do_splice(struct file *in, loff_t __user *off_in, ...@@ -82,6 +82,9 @@ extern long do_splice(struct file *in, loff_t __user *off_in,
struct file *out, loff_t __user *off_out, struct file *out, loff_t __user *off_out,
size_t len, unsigned int flags); size_t len, unsigned int flags);
extern long do_tee(struct file *in, struct file *out, size_t len,
unsigned int flags);
/* /*
* for dynamic pipe sizing * for dynamic pipe sizing
*/ */
......
...@@ -129,6 +129,7 @@ enum { ...@@ -129,6 +129,7 @@ enum {
IORING_OP_SPLICE, IORING_OP_SPLICE,
IORING_OP_PROVIDE_BUFFERS, IORING_OP_PROVIDE_BUFFERS,
IORING_OP_REMOVE_BUFFERS, IORING_OP_REMOVE_BUFFERS,
IORING_OP_TEE,
/* this goes last, obviously */ /* this goes last, obviously */
IORING_OP_LAST, IORING_OP_LAST,
...@@ -204,9 +205,18 @@ struct io_cqring_offsets { ...@@ -204,9 +205,18 @@ struct io_cqring_offsets {
__u32 ring_entries; __u32 ring_entries;
__u32 overflow; __u32 overflow;
__u32 cqes; __u32 cqes;
__u64 resv[2]; __u32 flags;
__u32 resv1;
__u64 resv2;
}; };
/*
* cq_ring->flags
*/
/* disable eventfd notifications */
#define IORING_CQ_EVENTFD_DISABLED (1U << 0)
/* /*
* io_uring_enter(2) flags * io_uring_enter(2) flags
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment