Commit 205c1d80 authored by Amir Goldstein's avatar Amir Goldstein Committed by Miklos Szeredi

fuse: allow parallel dio writes with FUSE_DIRECT_IO_ALLOW_MMAP

Instead of denying caching mode on parallel dio open, deny caching
open only while parallel dio are in-progress and wait for in-progress
parallel dio writes before entering inode caching io mode.

This allows executing parallel dio when inode is not in caching mode
even if shared mmap is allowed, but no mmaps have been performed on
the inode in question.

An mmap on direct_io file now waits for all in-progress parallel dio
writes to complete, so parallel dio writes together with
FUSE_DIRECT_IO_ALLOW_MMAP is enabled by this commit.
Signed-off-by: default avatarBernd Schubert <bschubert@ddn.com>
Signed-off-by: default avatarAmir Goldstein <amir73il@gmail.com>
Signed-off-by: default avatarMiklos Szeredi <mszeredi@redhat.com>
parent cb098dd2
...@@ -1335,6 +1335,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from ...@@ -1335,6 +1335,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data; struct fuse_file *ff = file->private_data;
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(iocb->ki_filp);
struct fuse_inode *fi = get_fuse_inode(inode);
/* Server side has to advise that it supports parallel dio writes. */ /* Server side has to advise that it supports parallel dio writes. */
if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES))
...@@ -1347,12 +1348,9 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from ...@@ -1347,12 +1348,9 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from
if (iocb->ki_flags & IOCB_APPEND) if (iocb->ki_flags & IOCB_APPEND)
return true; return true;
/* /* shared locks are not allowed with parallel page cache IO */
* Combination of page access and direct-io is difficult, shared locks if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
* actually introduce a conflict. return false;
*/
if (get_fuse_conn(inode)->direct_io_allow_mmap)
return true;
/* Parallel dio beyond EOF is not supported, at least for now. */ /* Parallel dio beyond EOF is not supported, at least for now. */
if (fuse_io_past_eof(iocb, from)) if (fuse_io_past_eof(iocb, from))
...@@ -1365,6 +1363,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, ...@@ -1365,6 +1363,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
bool *exclusive) bool *exclusive)
{ {
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(iocb->ki_filp);
struct fuse_file *ff = iocb->ki_filp->private_data;
*exclusive = fuse_dio_wr_exclusive_lock(iocb, from); *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
if (*exclusive) { if (*exclusive) {
...@@ -1372,10 +1371,14 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, ...@@ -1372,10 +1371,14 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
} else { } else {
inode_lock_shared(inode); inode_lock_shared(inode);
/* /*
* Previous check was without inode lock and might have raced, * New parallal dio allowed only if inode is not in caching
* check again. * mode and denies new opens in caching mode. This check
* should be performed only after taking shared inode lock.
* Previous past eof check was without inode lock and might
* have raced, so check it again.
*/ */
if (fuse_io_past_eof(iocb, from)) { if (fuse_io_past_eof(iocb, from) ||
fuse_file_uncached_io_start(inode, ff) != 0) {
inode_unlock_shared(inode); inode_unlock_shared(inode);
inode_lock(inode); inode_lock(inode);
*exclusive = true; *exclusive = true;
...@@ -1383,11 +1386,16 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, ...@@ -1383,11 +1386,16 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
} }
} }
static void fuse_dio_unlock(struct inode *inode, bool exclusive) static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
{ {
struct inode *inode = file_inode(iocb->ki_filp);
struct fuse_file *ff = iocb->ki_filp->private_data;
if (exclusive) { if (exclusive) {
inode_unlock(inode); inode_unlock(inode);
} else { } else {
/* Allow opens in caching mode after last parallel dio end */
fuse_file_uncached_io_end(inode, ff);
inode_unlock_shared(inode); inode_unlock_shared(inode);
} }
} }
...@@ -1669,7 +1677,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -1669,7 +1677,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
fuse_write_update_attr(inode, iocb->ki_pos, res); fuse_write_update_attr(inode, iocb->ki_pos, res);
} }
} }
fuse_dio_unlock(inode, exclusive); fuse_dio_unlock(iocb, exclusive);
return res; return res;
} }
...@@ -2523,6 +2531,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -2523,6 +2531,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
if (FUSE_IS_DAX(file_inode(file))) if (FUSE_IS_DAX(file_inode(file)))
return fuse_dax_mmap(file, vma); return fuse_dax_mmap(file, vma);
/*
* FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
* as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
*/
if (ff->open_flags & FOPEN_DIRECT_IO) { if (ff->open_flags & FOPEN_DIRECT_IO) {
/* /*
* Can't provide the coherency needed for MAP_SHARED * Can't provide the coherency needed for MAP_SHARED
...@@ -2538,7 +2550,11 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -2538,7 +2550,11 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
return generic_file_mmap(file, vma); return generic_file_mmap(file, vma);
} }
/* First mmap of direct_io file enters caching inode io mode. */ /*
* First mmap of direct_io file enters caching inode io mode.
* Also waits for parallel dio writers to go into serial mode
* (exclusive instead of shared lock).
*/
rc = fuse_file_cached_io_start(file_inode(file), ff); rc = fuse_file_cached_io_start(file_inode(file), ff);
if (rc) if (rc)
return rc; return rc;
...@@ -3312,6 +3328,7 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags) ...@@ -3312,6 +3328,7 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags)
fi->writectr = 0; fi->writectr = 0;
fi->iocachectr = 0; fi->iocachectr = 0;
init_waitqueue_head(&fi->page_waitq); init_waitqueue_head(&fi->page_waitq);
init_waitqueue_head(&fi->direct_io_waitq);
fi->writepages = RB_ROOT; fi->writepages = RB_ROOT;
if (IS_ENABLED(CONFIG_FUSE_DAX)) if (IS_ENABLED(CONFIG_FUSE_DAX))
......
...@@ -129,6 +129,9 @@ struct fuse_inode { ...@@ -129,6 +129,9 @@ struct fuse_inode {
/* Waitq for writepage completion */ /* Waitq for writepage completion */
wait_queue_head_t page_waitq; wait_queue_head_t page_waitq;
/* waitq for direct-io completion */
wait_queue_head_t direct_io_waitq;
/* List of writepage requestst (pending or sent) */ /* List of writepage requestst (pending or sent) */
struct rb_root writepages; struct rb_root writepages;
}; };
...@@ -1353,6 +1356,8 @@ int fuse_fileattr_set(struct mnt_idmap *idmap, ...@@ -1353,6 +1356,8 @@ int fuse_fileattr_set(struct mnt_idmap *idmap,
/* iomode.c */ /* iomode.c */
int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff); int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff);
int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff);
void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff);
int fuse_file_io_open(struct file *file, struct inode *inode); int fuse_file_io_open(struct file *file, struct inode *inode);
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); void fuse_file_io_release(struct fuse_file *ff, struct inode *inode);
......
...@@ -13,21 +13,37 @@ ...@@ -13,21 +13,37 @@
#include <linux/fs.h> #include <linux/fs.h>
/* /*
* Start cached io mode, where parallel dio writes are not allowed. * Return true if need to wait for new opens in caching mode.
*/
static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
{
return READ_ONCE(fi->iocachectr) < 0;
}
/*
* Start cached io mode.
*
* Blocks new parallel dio writes and waits for the in-progress parallel dio
* writes to complete.
*/ */
int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
{ {
struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_inode *fi = get_fuse_inode(inode);
int err = 0;
/* There are no io modes if server does not implement open */ /* There are no io modes if server does not implement open */
if (!ff->release_args) if (!ff->release_args)
return 0; return 0;
spin_lock(&fi->lock); spin_lock(&fi->lock);
if (fi->iocachectr < 0) { /*
err = -ETXTBSY; * Setting the bit advises new direct-io writes to use an exclusive
goto unlock; * lock - without it the wait below might be forever.
*/
while (fuse_is_io_cache_wait(fi)) {
set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
spin_unlock(&fi->lock);
wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi));
spin_lock(&fi->lock);
} }
WARN_ON(ff->iomode == IOM_UNCACHED); WARN_ON(ff->iomode == IOM_UNCACHED);
if (ff->iomode == IOM_NONE) { if (ff->iomode == IOM_NONE) {
...@@ -36,9 +52,8 @@ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) ...@@ -36,9 +52,8 @@ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
fi->iocachectr++; fi->iocachectr++;
} }
unlock:
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
return err; return 0;
} }
static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
...@@ -56,7 +71,7 @@ static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) ...@@ -56,7 +71,7 @@ static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
} }
/* Start strictly uncached io mode where cache access is not allowed */ /* Start strictly uncached io mode where cache access is not allowed */
static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff)
{ {
struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_inode *fi = get_fuse_inode(inode);
int err = 0; int err = 0;
...@@ -74,7 +89,7 @@ static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff ...@@ -74,7 +89,7 @@ static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff
return err; return err;
} }
static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
{ {
struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_inode *fi = get_fuse_inode(inode);
...@@ -83,6 +98,8 @@ static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) ...@@ -83,6 +98,8 @@ static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
WARN_ON(ff->iomode != IOM_UNCACHED); WARN_ON(ff->iomode != IOM_UNCACHED);
ff->iomode = IOM_NONE; ff->iomode = IOM_NONE;
fi->iocachectr++; fi->iocachectr++;
if (!fi->iocachectr)
wake_up(&fi->direct_io_waitq);
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
} }
...@@ -106,21 +123,16 @@ int fuse_file_io_open(struct file *file, struct inode *inode) ...@@ -106,21 +123,16 @@ int fuse_file_io_open(struct file *file, struct inode *inode)
ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES; ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES;
/* /*
* First parallel dio open denies caching inode io mode.
* First caching file open enters caching inode io mode. * First caching file open enters caching inode io mode.
* *
* Note that if user opens a file open with O_DIRECT, but server did * Note that if user opens a file open with O_DIRECT, but server did
* not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT, * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT,
* so we put the inode in caching mode to prevent parallel dio. * so we put the inode in caching mode to prevent parallel dio.
*/ */
if (ff->open_flags & FOPEN_DIRECT_IO) { if (ff->open_flags & FOPEN_DIRECT_IO)
if (ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) return 0;
err = fuse_file_uncached_io_start(inode, ff);
else err = fuse_file_cached_io_start(inode, ff);
return 0;
} else {
err = fuse_file_cached_io_start(inode, ff);
}
if (err) if (err)
goto fail; goto fail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment