Commit 8e15605b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '9p-6.4-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs

Pull 9p updates from Eric Van Hensbergen:
 "This includes a number of patches that didn't quite make the cut last
  merge window while we addressed some outstanding issues and review
  comments. It includes some new caching modes for those that only want
  readahead caches and reworks how we do writeback caching so we are not
  keeping extra references around which both causes performance problems
  and uses lots of additional resources on the server.

  It also includes a new flag to force disabling of xattrs which can
  also cause major performance issues, particularly if the underlying
  filesystem on the server doesn't support them.

  Finally it adds a couple of additional mount options to better support
  directio and enabling caches when the server doesn't support
  qid.version.

  There was one late-breaking bug report that has also been included as
  its own patch where I forgot to propagate an embarassing bit-logic fix
  to the various variations of open"

* tag '9p-6.4-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs:
  fs/9p: Fix bit operation logic error
  fs/9p: Rework cache modes and add new options to Documentation
  fs/9p: remove writeback fid and fix per-file modes
  fs/9p: Add new mount modes
  9p: Add additional debug flags and open modes
  fs/9p: allow disable of xattr support on mount
  fs/9p: Remove unnecessary superblock flags
  fs/9p: Consolidate file operations and add readahead and writeback
parents a1fd058b 21e26d5e
......@@ -78,19 +78,39 @@ Options
offering several exported file systems.
cache=mode specifies a caching policy. By default, no caches are used.
none
default no cache policy, metadata and data
alike are synchronous.
loose
no attempts are made at consistency,
intended for exclusive, read-only mounts
fscache
use FS-Cache for a persistent, read-only
cache backend.
mmap
minimal cache that is only used for read-write
mmap. Northing else is cached, like cache=none
The mode can be specified as a bitmask or by using one of the
prexisting common 'shortcuts'.
The bitmask is described below: (unspecified bits are reserved)
========== ====================================================
0b00000000 all caches disabled, mmap disabled
0b00000001 file caches enabled
0b00000010 meta-data caches enabled
0b00000100 writeback behavior (as opposed to writethrough)
0b00001000 loose caches (no explicit consistency with server)
0b10000000 fscache enabled for persistent caching
========== ====================================================
The current shortcuts and their associated bitmask are:
========= ====================================================
none 0b00000000 (no caching)
readahead 0b00000001 (only read-ahead file caching)
mmap 0b00000101 (read-ahead + writeback file cache)
loose 0b00001111 (non-coherent file and meta-data caches)
fscache 0b10001111 (persistent loose cache)
========= ====================================================
NOTE: only these shortcuts are tested modes of operation at the
moment, so using other combinations of bit-patterns is not
known to work. Work on better cache support is in progress.
IMPORTANT: loose caches (and by extension at the moment fscache)
do not necessarily validate cached values on the server. In other
words changes on the server are not guaranteed to be reflected
on the client system. Only use this mode of operation if you
have an exclusive mount and the server will modify the filesystem
underneath you.
debug=n specifies debug level. The debug level is a bitmask.
......@@ -137,6 +157,12 @@ Options
This can be used to share devices/named pipes/sockets between
hosts. This functionality will be expanded in later versions.
directio bypass page cache on all read/write operations
ignoreqv ignore qid.version==0 as a marker to ignore cache
noxattr do not offer xattr functions on this mount.
access there are four access modes.
user
if a user tries to access a file on v9fs
......
......@@ -8,9 +8,8 @@
#ifndef _9P_CACHE_H
#define _9P_CACHE_H
#include <linux/fscache.h>
#ifdef CONFIG_9P_FSCACHE
#include <linux/fscache.h>
extern int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses,
const char *dev_name);
......
......@@ -41,14 +41,24 @@ void v9fs_fid_add(struct dentry *dentry, struct p9_fid **pfid)
*pfid = NULL;
}
static bool v9fs_is_writeable(int mode)
{
if (mode & (P9_OWRITE|P9_ORDWR))
return true;
else
return false;
}
/**
* v9fs_fid_find_inode - search for an open fid off of the inode list
* @inode: return a fid pointing to a specific inode
* @want_writeable: only consider fids which are writeable
* @uid: return a fid belonging to the specified user
* @any: ignore uid as a selection criteria
*
*/
static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
struct p9_fid *v9fs_fid_find_inode(struct inode *inode, bool want_writeable,
kuid_t uid, bool any)
{
struct hlist_head *h;
struct p9_fid *fid, *ret = NULL;
......@@ -58,7 +68,12 @@ static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
spin_lock(&inode->i_lock);
h = (struct hlist_head *)&inode->i_private;
hlist_for_each_entry(fid, h, ilist) {
if (uid_eq(fid->uid, uid)) {
if (any || uid_eq(fid->uid, uid)) {
if (want_writeable && !v9fs_is_writeable(fid->mode)) {
p9_debug(P9_DEBUG_VFS, " mode: %x not writeable?\n",
fid->mode);
continue;
}
p9_fid_get(fid);
ret = fid;
break;
......@@ -118,7 +133,7 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
spin_unlock(&dentry->d_lock);
} else {
if (dentry->d_inode)
ret = v9fs_fid_find_inode(dentry->d_inode, uid);
ret = v9fs_fid_find_inode(dentry->d_inode, false, uid, any);
}
return ret;
......@@ -299,28 +314,3 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
return v9fs_fid_lookup_with_uid(dentry, uid, any);
}
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
{
int err;
struct p9_fid *fid, *ofid;
ofid = v9fs_fid_lookup_with_uid(dentry, GLOBAL_ROOT_UID, 0);
fid = clone_fid(ofid);
if (IS_ERR(fid))
goto error_out;
p9_fid_put(ofid);
/*
* writeback fid will only be used to write back the
* dirty pages. We always request for the open fid in read-write
* mode so that a partial page write which result in page
* read can work.
*/
err = p9_client_open(fid, O_RDWR);
if (err < 0) {
p9_fid_put(fid);
fid = ERR_PTR(err);
goto error_out;
}
error_out:
return fid;
}
......@@ -7,14 +7,16 @@
#ifndef FS_9P_FID_H
#define FS_9P_FID_H
#include <linux/list.h>
#include "v9fs.h"
struct p9_fid *v9fs_fid_find_inode(struct inode *inode, bool want_writeable,
kuid_t uid, bool any);
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry);
static inline struct p9_fid *v9fs_parent_fid(struct dentry *dentry)
{
return v9fs_fid_lookup(dentry->d_parent);
}
void v9fs_fid_add(struct dentry *dentry, struct p9_fid **fid);
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry);
void v9fs_open_fid_add(struct inode *inode, struct p9_fid **fid);
static inline struct p9_fid *clone_fid(struct p9_fid *fid)
{
......@@ -32,4 +34,31 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
p9_fid_put(fid);
return nfid;
}
/**
* v9fs_fid_addmodes - add cache flags to fid mode (for client use only)
* @fid: fid to augment
* @s_flags: session info mount flags
* @s_cache: session info cache flags
* @f_flags: unix open flags
*
* make sure mode reflects flags of underlying mounts
* also qid.version == 0 reflects a synthetic or legacy file system
* NOTE: these are set after open so only reflect 9p client not
* underlying file system on server.
*/
static inline void v9fs_fid_add_modes(struct p9_fid *fid, int s_flags,
int s_cache, unsigned int f_flags)
{
if (fid->qid.type != P9_QTFILE)
return;
if ((!s_cache) ||
((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) ||
(s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) {
fid->mode |= P9L_DIRECT; /* no read or write cache */
} else if ((!(s_cache & CACHE_WRITEBACK)) ||
(f_flags & O_DSYNC) | (s_flags & V9FS_SYNC)) {
fid->mode |= P9L_NOWRITECACHE;
}
}
#endif
......@@ -38,9 +38,7 @@ enum {
/* String options */
Opt_uname, Opt_remotename, Opt_cache, Opt_cachetag,
/* Options that take no arguments */
Opt_nodevmap,
/* Cache options */
Opt_cache_loose, Opt_fscache, Opt_mmap,
Opt_nodevmap, Opt_noxattr, Opt_directio, Opt_ignoreqv,
/* Access options */
Opt_access, Opt_posixacl,
/* Lock timeout option */
......@@ -57,10 +55,10 @@ static const match_table_t tokens = {
{Opt_uname, "uname=%s"},
{Opt_remotename, "aname=%s"},
{Opt_nodevmap, "nodevmap"},
{Opt_noxattr, "noxattr"},
{Opt_directio, "directio"},
{Opt_ignoreqv, "ignoreqv"},
{Opt_cache, "cache=%s"},
{Opt_cache_loose, "loose"},
{Opt_fscache, "fscache"},
{Opt_mmap, "mmap"},
{Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
{Opt_posixacl, "posixacl"},
......@@ -68,32 +66,30 @@ static const match_table_t tokens = {
{Opt_err, NULL}
};
static const char *const v9fs_cache_modes[nr__p9_cache_modes] = {
[CACHE_NONE] = "none",
[CACHE_MMAP] = "mmap",
[CACHE_LOOSE] = "loose",
[CACHE_FSCACHE] = "fscache",
};
/* Interpret mount options for cache mode */
static int get_cache_mode(char *s)
{
int version = -EINVAL;
if (!strcmp(s, "loose")) {
version = CACHE_LOOSE;
version = CACHE_SC_LOOSE;
p9_debug(P9_DEBUG_9P, "Cache mode: loose\n");
} else if (!strcmp(s, "fscache")) {
version = CACHE_FSCACHE;
version = CACHE_SC_FSCACHE;
p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n");
} else if (!strcmp(s, "mmap")) {
version = CACHE_MMAP;
version = CACHE_SC_MMAP;
p9_debug(P9_DEBUG_9P, "Cache mode: mmap\n");
} else if (!strcmp(s, "readahead")) {
version = CACHE_SC_READAHEAD;
p9_debug(P9_DEBUG_9P, "Cache mode: readahead\n");
} else if (!strcmp(s, "none")) {
version = CACHE_NONE;
version = CACHE_SC_NONE;
p9_debug(P9_DEBUG_9P, "Cache mode: none\n");
} else
pr_info("Unknown Cache mode %s\n", s);
} else if (kstrtoint(s, 0, &version) != 0) {
version = -EINVAL;
pr_info("Unknown Cache mode or invalid value %s\n", s);
}
return version;
}
......@@ -121,9 +117,9 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
if (v9ses->nodev)
seq_puts(m, ",nodevmap");
if (v9ses->cache)
seq_printf(m, ",%s", v9fs_cache_modes[v9ses->cache]);
seq_printf(m, ",cache=%x", v9ses->cache);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cachetag && v9ses->cache == CACHE_FSCACHE)
if (v9ses->cachetag && (v9ses->cache & CACHE_FSCACHE))
seq_printf(m, ",cachetag=%s", v9ses->cachetag);
#endif
......@@ -143,9 +139,16 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
break;
}
if (v9ses->flags & V9FS_IGNORE_QV)
seq_puts(m, ",ignoreqv");
if (v9ses->flags & V9FS_DIRECT_IO)
seq_puts(m, ",directio");
if (v9ses->flags & V9FS_POSIX_ACL)
seq_puts(m, ",posixacl");
if (v9ses->flags & V9FS_NO_XATTR)
seq_puts(m, ",noxattr");
return p9_show_client_options(m, v9ses->clnt);
}
......@@ -266,14 +269,14 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_nodevmap:
v9ses->nodev = 1;
break;
case Opt_cache_loose:
v9ses->cache = CACHE_LOOSE;
case Opt_noxattr:
v9ses->flags |= V9FS_NO_XATTR;
break;
case Opt_fscache:
v9ses->cache = CACHE_FSCACHE;
case Opt_directio:
v9ses->flags |= V9FS_DIRECT_IO;
break;
case Opt_mmap:
v9ses->cache = CACHE_MMAP;
case Opt_ignoreqv:
v9ses->flags |= V9FS_IGNORE_QV;
break;
case Opt_cachetag:
#ifdef CONFIG_9P_FSCACHE
......@@ -468,7 +471,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
#ifdef CONFIG_9P_FSCACHE
/* register the session for caching */
if (v9ses->cache == CACHE_FSCACHE) {
if (v9ses->cache & CACHE_FSCACHE) {
rc = v9fs_cache_session_get_cookie(v9ses, dev_name);
if (rc < 0)
goto err_clnt;
......
......@@ -31,29 +31,54 @@
#define V9FS_ACL_MASK V9FS_POSIX_ACL
enum p9_session_flags {
V9FS_PROTO_2000U = 0x01,
V9FS_PROTO_2000L = 0x02,
V9FS_ACCESS_SINGLE = 0x04,
V9FS_ACCESS_USER = 0x08,
V9FS_ACCESS_CLIENT = 0x10,
V9FS_POSIX_ACL = 0x20
V9FS_PROTO_2000U = 0x01,
V9FS_PROTO_2000L = 0x02,
V9FS_ACCESS_SINGLE = 0x04,
V9FS_ACCESS_USER = 0x08,
V9FS_ACCESS_CLIENT = 0x10,
V9FS_POSIX_ACL = 0x20,
V9FS_NO_XATTR = 0x40,
V9FS_IGNORE_QV = 0x80, /* ignore qid.version for cache hints */
V9FS_DIRECT_IO = 0x100,
V9FS_SYNC = 0x200
};
/* possible values of ->cache */
/**
* enum p9_cache_modes - user specified cache preferences
* @CACHE_NONE: do not cache data, dentries, or directory contents (default)
* @CACHE_LOOSE: cache data, dentries, and directory contents w/no consistency
* enum p9_cache_shortcuts - human readable cache preferences
* @CACHE_SC_NONE: disable all caches
* @CACHE_SC_READAHEAD: only provide caching for readahead
* @CACHE_SC_MMAP: provide caching to enable mmap
* @CACHE_SC_LOOSE: non-coherent caching for files and meta data
* @CACHE_SC_FSCACHE: persistent non-coherent caching for files and meta-data
*
* eventually support loose, tight, time, session, default always none
*/
enum p9_cache_modes {
CACHE_NONE,
CACHE_MMAP,
CACHE_LOOSE,
CACHE_FSCACHE,
nr__p9_cache_modes
enum p9_cache_shortcuts {
CACHE_SC_NONE = 0b00000000,
CACHE_SC_READAHEAD = 0b00000001,
CACHE_SC_MMAP = 0b00000101,
CACHE_SC_LOOSE = 0b00001111,
CACHE_SC_FSCACHE = 0b10001111,
};
/**
* enum p9_cache_bits - possible values of ->cache
* @CACHE_NONE: caches disabled
* @CACHE_FILE: file caching (open to close)
* @CACHE_META: meta-data and directory caching
* @CACHE_WRITEBACK: write-back caching for files
* @CACHE_LOOSE: don't check cache consistency
* @CACHE_FSCACHE: local persistent caches
*
*/
enum p9_cache_bits {
CACHE_NONE = 0b00000000,
CACHE_FILE = 0b00000001,
CACHE_META = 0b00000010,
CACHE_WRITEBACK = 0b00000100,
CACHE_LOOSE = 0b00001000,
CACHE_FSCACHE = 0b10000000,
};
/**
......@@ -62,7 +87,7 @@ enum p9_cache_modes {
* @nodev: set to 1 to disable device mapping
* @debug: debug level
* @afid: authentication handle
* @cache: cache mode of type &p9_cache_modes
* @cache: cache mode of type &p9_cache_bits
* @cachetag: the tag of the cache associated with this session
* @fscache: session cookie associated with FS-Cache
* @uname: string user name to mount hierarchy as
......@@ -112,7 +137,6 @@ struct v9fs_inode {
struct netfs_inode netfs; /* Netfslib context and vfs inode */
struct p9_qid qid;
unsigned int cache_validity;
struct p9_fid *writeback_fid;
struct mutex v_mutex;
};
......
......@@ -36,10 +36,6 @@ extern const struct file_operations v9fs_dir_operations;
extern const struct file_operations v9fs_dir_operations_dotl;
extern const struct dentry_operations v9fs_dentry_operations;
extern const struct dentry_operations v9fs_cached_dentry_operations;
extern const struct file_operations v9fs_cached_file_operations;
extern const struct file_operations v9fs_cached_file_operations_dotl;
extern const struct file_operations v9fs_mmap_file_operations;
extern const struct file_operations v9fs_mmap_file_operations_dotl;
extern struct kmem_cache *v9fs_inode_cache;
struct inode *v9fs_alloc_inode(struct super_block *sb);
......
......@@ -57,8 +57,6 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
*/
static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
{
struct inode *inode = file_inode(file);
struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_fid *fid = file->private_data;
BUG_ON(!fid);
......@@ -66,11 +64,8 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
/* we might need to read from a fid that was opened write-only
* for read-modify-write of page cache, use the writeback fid
* for that */
if (rreq->origin == NETFS_READ_FOR_WRITE &&
(fid->mode & O_ACCMODE) == O_WRONLY) {
fid = v9inode->writeback_fid;
BUG_ON(!fid);
}
WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE &&
!(fid->mode & P9_ORDWR));
p9_fid_get(fid);
rreq->netfs_priv = fid;
......@@ -120,8 +115,6 @@ const struct netfs_request_ops v9fs_req_ops = {
static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
{
struct inode *inode = folio_inode(folio);
if (folio_test_private(folio))
return false;
#ifdef CONFIG_9P_FSCACHE
......@@ -130,8 +123,8 @@ static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
return false;
folio_wait_fscache(folio);
}
fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio))));
#endif
fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
return true;
}
......@@ -141,6 +134,7 @@ static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
folio_wait_fscache(folio);
}
#ifdef CONFIG_9P_FSCACHE
static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
bool was_async)
{
......@@ -154,17 +148,19 @@ static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
i_size_read(&v9inode->netfs.inode), 0);
}
}
#endif
static int v9fs_vfs_write_folio_locked(struct folio *folio)
{
struct inode *inode = folio_inode(folio);
struct v9fs_inode *v9inode = V9FS_I(inode);
struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
loff_t start = folio_pos(folio);
loff_t i_size = i_size_read(inode);
struct iov_iter from;
size_t len = folio_size(folio);
struct p9_fid *writeback_fid;
int err;
struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
struct fscache_cookie __maybe_unused *cookie = v9fs_inode_cookie(v9inode);
if (start >= i_size)
return 0; /* Simultaneous truncation occurred */
......@@ -173,25 +169,33 @@ static int v9fs_vfs_write_folio_locked(struct folio *folio)
iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len);
/* We should have writeback_fid always set */
BUG_ON(!v9inode->writeback_fid);
writeback_fid = v9fs_fid_find_inode(inode, true, INVALID_UID, true);
if (!writeback_fid) {
WARN_ONCE(1, "folio expected an open fid inode->i_private=%p\n",
inode->i_private);
return -EINVAL;
}
folio_wait_fscache(folio);
folio_start_writeback(folio);
p9_client_write(v9inode->writeback_fid, start, &from, &err);
p9_client_write(writeback_fid, start, &from, &err);
#ifdef CONFIG_9P_FSCACHE
if (err == 0 &&
fscache_cookie_enabled(cookie) &&
test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
fscache_cookie_enabled(cookie) &&
test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
folio_start_fscache(folio);
fscache_write_to_cache(v9fs_inode_cookie(v9inode),
folio_mapping(folio), start, len, i_size,
v9fs_write_to_cache_done, v9inode,
true);
folio_mapping(folio), start, len, i_size,
v9fs_write_to_cache_done, v9inode,
true);
}
#endif
folio_end_writeback(folio);
p9_fid_put(writeback_fid);
return err;
}
......@@ -298,7 +302,6 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
loff_t last_pos = pos + copied;
struct folio *folio = page_folio(subpage);
struct inode *inode = mapping->host;
struct v9fs_inode *v9inode = V9FS_I(inode);
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
......@@ -318,7 +321,10 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
if (last_pos > inode->i_size) {
inode_add_bytes(inode, last_pos - inode->i_size);
i_size_write(inode, last_pos);
fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
#ifdef CONFIG_9P_FSCACHE
fscache_update_cookie(v9fs_inode_cookie(V9FS_I(inode)), NULL,
&last_pos);
#endif
}
folio_mark_dirty(folio);
out:
......
......@@ -197,9 +197,9 @@ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx)
/**
* v9fs_dir_release - called on a close of a file or directory
* @inode: inode of the directory
* @filp: file pointer to a directory
* v9fs_dir_release - close a directory or a file
* @inode: inode of the directory or file
* @filp: file pointer to a directory or file
*
*/
......@@ -214,7 +214,11 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
fid = filp->private_data;
p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n",
inode, filp, fid ? fid->fid : -1);
if (fid) {
if ((S_ISREG(inode->i_mode)) && (filp->f_mode & FMODE_WRITE))
retval = filemap_fdatawrite(inode->i_mapping);
spin_lock(&inode->i_lock);
hlist_del(&fid->ilist);
spin_unlock(&inode->i_lock);
......
......@@ -29,7 +29,6 @@
#include "fid.h"
#include "cache.h"
static const struct vm_operations_struct v9fs_file_vm_ops;
static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
/**
......@@ -42,13 +41,11 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
int v9fs_file_open(struct inode *inode, struct file *file)
{
int err;
struct v9fs_inode *v9inode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid, *writeback_fid;
struct p9_fid *fid;
int omode;
p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
v9inode = V9FS_I(inode);
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
omode = v9fs_open_to_dotl_flags(file->f_flags);
......@@ -61,7 +58,19 @@ int v9fs_file_open(struct inode *inode, struct file *file)
if (IS_ERR(fid))
return PTR_ERR(fid);
err = p9_client_open(fid, omode);
if ((v9ses->cache & CACHE_WRITEBACK) && (omode & P9_OWRITE)) {
int writeback_omode = (omode & ~P9_OWRITE) | P9_ORDWR;
p9_debug(P9_DEBUG_CACHE, "write-only file with writeback enabled, try opening O_RDWR\n");
err = p9_client_open(fid, writeback_omode);
if (err < 0) {
p9_debug(P9_DEBUG_CACHE, "could not open O_RDWR, disabling caches\n");
err = p9_client_open(fid, omode);
fid->mode |= P9L_DIRECT;
}
} else {
err = p9_client_open(fid, omode);
}
if (err < 0) {
p9_fid_put(fid);
return err;
......@@ -73,36 +82,14 @@ int v9fs_file_open(struct inode *inode, struct file *file)
file->private_data = fid;
}
mutex_lock(&v9inode->v_mutex);
if ((v9ses->cache) && !v9inode->writeback_fid &&
((file->f_flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
writeback_fid = v9fs_writeback_fid(file_dentry(file));
if (IS_ERR(writeback_fid)) {
err = PTR_ERR(writeback_fid);
mutex_unlock(&v9inode->v_mutex);
goto out_error;
}
v9inode->writeback_fid = (void *) writeback_fid;
}
mutex_unlock(&v9inode->v_mutex);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache == CACHE_FSCACHE)
fscache_use_cookie(v9fs_inode_cookie(v9inode),
if (v9ses->cache & CACHE_FSCACHE)
fscache_use_cookie(v9fs_inode_cookie(V9FS_I(inode)),
file->f_mode & FMODE_WRITE);
#endif
v9fs_fid_add_modes(fid, v9ses->flags, v9ses->cache, file->f_flags);
v9fs_open_fid_add(inode, &fid);
return 0;
out_error:
p9_fid_put(file->private_data);
file->private_data = NULL;
return err;
}
/**
......@@ -369,8 +356,13 @@ v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct p9_fid *fid = iocb->ki_filp->private_data;
int ret, err = 0;
p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
iov_iter_count(to), iocb->ki_pos);
p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n",
fid->fid, iov_iter_count(to), iocb->ki_pos);
if (!(fid->mode & P9L_DIRECT)) {
p9_debug(P9_DEBUG_VFS, "(cached)\n");
return generic_file_read_iter(iocb, to);
}
if (iocb->ki_filp->f_flags & O_NONBLOCK)
ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
......@@ -393,10 +385,18 @@ static ssize_t
v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct p9_fid *fid = file->private_data;
ssize_t retval;
loff_t origin;
int err = 0;
p9_debug(P9_DEBUG_VFS, "fid %d\n", fid->fid);
if (!(fid->mode & (P9L_DIRECT | P9L_NOWRITECACHE))) {
p9_debug(P9_DEBUG_CACHE, "(cached)\n");
return generic_file_write_iter(iocb, from);
}
retval = generic_write_checks(iocb, from);
if (retval <= 0)
return retval;
......@@ -478,45 +478,18 @@ static int
v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
int retval;
struct inode *inode = file_inode(filp);
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
p9_debug(P9_DEBUG_MMAP, "filp :%p\n", filp);
retval = generic_file_mmap(filp, vma);
if (!retval)
vma->vm_ops = &v9fs_file_vm_ops;
return retval;
}
static int
v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
int retval;
struct inode *inode;
struct v9fs_inode *v9inode;
struct p9_fid *fid;
inode = file_inode(filp);
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if (!v9inode->writeback_fid &&
(vma->vm_flags & VM_SHARED) &&
(vma->vm_flags & VM_WRITE)) {
/*
* clone a fid and add it to writeback_fid
* we do it during mmap instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
fid = v9fs_writeback_fid(file_dentry(filp));
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
mutex_unlock(&v9inode->v_mutex);
return retval;
}
v9inode->writeback_fid = (void *) fid;
if (!(v9ses->cache & CACHE_WRITEBACK)) {
p9_debug(P9_DEBUG_CACHE, "(no mmap mode)");
if (vma->vm_flags & VM_MAYSHARE)
return -ENODEV;
invalidate_inode_pages2(filp->f_mapping);
return generic_file_readonly_mmap(filp, vma);
}
mutex_unlock(&v9inode->v_mutex);
retval = generic_file_mmap(filp, vma);
if (!retval)
......@@ -528,7 +501,6 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
static vm_fault_t
v9fs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct v9fs_inode *v9inode;
struct folio *folio = page_folio(vmf->page);
struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp);
......@@ -537,8 +509,6 @@ v9fs_vm_page_mkwrite(struct vm_fault *vmf)
p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
folio, (unsigned long)filp->private_data);
v9inode = V9FS_I(inode);
/* Wait for the page to be written to the cache before we allow it to
* be modified. We then assume the entire page will need writing back.
*/
......@@ -551,7 +521,6 @@ v9fs_vm_page_mkwrite(struct vm_fault *vmf)
/* Update file times before taking page lock */
file_update_time(filp);
BUG_ON(!v9inode->writeback_fid);
if (folio_lock_killable(folio) < 0)
return VM_FAULT_RETRY;
if (folio_mapping(folio) != inode->i_mapping)
......@@ -564,35 +533,6 @@ v9fs_vm_page_mkwrite(struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
/**
* v9fs_mmap_file_read_iter - read from a file
* @iocb: The operation parameters
* @to: The buffer to read into
*
*/
static ssize_t
v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
/* TODO: Check if there are dirty pages */
return v9fs_file_read_iter(iocb, to);
}
/**
* v9fs_mmap_file_write_iter - write to a file
* @iocb: The operation parameters
* @from: The data to write
*
*/
static ssize_t
v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
/*
* TODO: invalidate mmaps on filp's inode between
* offset and offset+count
*/
return v9fs_file_write_iter(iocb, from);
}
static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
{
struct inode *inode;
......@@ -615,13 +555,6 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
filemap_fdatawrite_wbc(inode->i_mapping, &wbc);
}
static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = v9fs_vm_page_mkwrite,
};
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
.close = v9fs_mmap_vm_close,
.fault = filemap_fault,
......@@ -629,34 +562,6 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
.page_mkwrite = v9fs_vm_page_mkwrite,
};
const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = v9fs_file_mmap,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync,
};
const struct file_operations v9fs_cached_file_operations_dotl = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
.mmap = v9fs_file_mmap,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync_dotl,
};
const struct file_operations v9fs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = v9fs_file_read_iter,
......@@ -678,34 +583,7 @@ const struct file_operations v9fs_file_operations_dotl = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
.mmap = generic_file_readonly_mmap,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync_dotl,
};
const struct file_operations v9fs_mmap_file_operations = {
.llseek = generic_file_llseek,
.read_iter = v9fs_mmap_file_read_iter,
.write_iter = v9fs_mmap_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = v9fs_mmap_file_mmap,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync,
};
const struct file_operations v9fs_mmap_file_operations_dotl = {
.llseek = generic_file_llseek,
.read_iter = v9fs_mmap_file_read_iter,
.write_iter = v9fs_mmap_file_write_iter,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
.mmap = v9fs_mmap_file_mmap,
.mmap = v9fs_file_mmap,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync_dotl,
......
......@@ -230,7 +230,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
v9inode = alloc_inode_sb(sb, v9fs_inode_cache, GFP_KERNEL);
if (!v9inode)
return NULL;
v9inode->writeback_fid = NULL;
v9inode->cache_validity = 0;
mutex_init(&v9inode->v_mutex);
return &v9inode->netfs.inode;
......@@ -287,24 +286,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
case S_IFREG:
if (v9fs_proto_dotl(v9ses)) {
inode->i_op = &v9fs_file_inode_operations_dotl;
if (v9ses->cache == CACHE_LOOSE ||
v9ses->cache == CACHE_FSCACHE)
inode->i_fop =
&v9fs_cached_file_operations_dotl;
else if (v9ses->cache == CACHE_MMAP)
inode->i_fop = &v9fs_mmap_file_operations_dotl;
else
inode->i_fop = &v9fs_file_operations_dotl;
inode->i_fop = &v9fs_file_operations_dotl;
} else {
inode->i_op = &v9fs_file_inode_operations;
if (v9ses->cache == CACHE_LOOSE ||
v9ses->cache == CACHE_FSCACHE)
inode->i_fop =
&v9fs_cached_file_operations;
else if (v9ses->cache == CACHE_MMAP)
inode->i_fop = &v9fs_mmap_file_operations;
else
inode->i_fop = &v9fs_file_operations;
inode->i_fop = &v9fs_file_operations;
}
break;
......@@ -386,20 +371,23 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
*/
void v9fs_evict_inode(struct inode *inode)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
__le32 version;
struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
__le32 __maybe_unused version;
truncate_inode_pages_final(&inode->i_data);
#ifdef CONFIG_9P_FSCACHE
version = cpu_to_le32(v9inode->qid.version);
fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode,
&version);
#endif
clear_inode(inode);
filemap_fdatawrite(&inode->i_data);
#ifdef CONFIG_9P_FSCACHE
fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
/* clunk the fid stashed in writeback_fid */
p9_fid_put(v9inode->writeback_fid);
v9inode->writeback_fid = NULL;
#endif
}
static int v9fs_test_inode(struct inode *inode, void *data)
......@@ -779,7 +767,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
inode = NULL;
else if (IS_ERR(fid))
inode = ERR_CAST(fid);
else if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
else if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
else
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
......@@ -808,11 +796,12 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
{
int err;
u32 perm;
struct v9fs_inode *v9inode;
struct v9fs_inode __maybe_unused *v9inode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid, *inode_fid;
struct p9_fid *fid;
struct dentry *res = NULL;
struct inode *inode;
int p9_omode;
if (d_in_lookup(dentry)) {
res = v9fs_vfs_lookup(dir, dentry, 0);
......@@ -831,9 +820,14 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
v9ses = v9fs_inode2v9ses(dir);
perm = unixmode2p9mode(v9ses, mode);
fid = v9fs_create(v9ses, dir, dentry, NULL, perm,
v9fs_uflags2omode(flags,
v9fs_proto_dotu(v9ses)));
p9_omode = v9fs_uflags2omode(flags, v9fs_proto_dotu(v9ses));
if ((v9ses->cache & CACHE_WRITEBACK) && (p9_omode & P9_OWRITE)) {
p9_omode = (p9_omode & ~P9_OWRITE) | P9_ORDWR;
p9_debug(P9_DEBUG_CACHE,
"write-only file with writeback enabled, creating w/ O_RDWR\n");
}
fid = v9fs_create(v9ses, dir, dentry, NULL, perm, p9_omode);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto error;
......@@ -842,33 +836,18 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
v9fs_invalidate_inode_attr(dir);
inode = d_inode(dentry);
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if ((v9ses->cache) && !v9inode->writeback_fid &&
((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
mutex_unlock(&v9inode->v_mutex);
goto error;
}
v9inode->writeback_fid = (void *) inode_fid;
}
mutex_unlock(&v9inode->v_mutex);
err = finish_open(file, dentry, generic_file_open);
if (err)
goto error;
file->private_data = fid;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache & CACHE_FSCACHE)
fscache_use_cookie(v9fs_inode_cookie(v9inode),
file->f_mode & FMODE_WRITE);
#endif
v9fs_fid_add_modes(fid, v9ses->flags, v9ses->cache, file->f_flags);
v9fs_open_fid_add(inode, &fid);
file->f_mode |= FMODE_CREATED;
......@@ -1030,15 +1009,24 @@ v9fs_vfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct inode *inode = d_inode(dentry);
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_wstat *st;
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(&nop_mnt_idmap, d_inode(dentry), stat);
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
generic_fillattr(&nop_mnt_idmap, inode, stat);
return 0;
} else if (v9ses->cache & CACHE_WRITEBACK) {
if (S_ISREG(inode->i_mode)) {
int retval = filemap_fdatawrite(inode->i_mapping);
if (retval)
p9_debug(P9_DEBUG_ERROR,
"flushing writeback during getattr returned %d\n", retval);
}
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
......@@ -1070,7 +1058,6 @@ static int v9fs_vfs_setattr(struct mnt_idmap *idmap,
{
int retval, use_dentry = 0;
struct inode *inode = d_inode(dentry);
struct v9fs_inode *v9inode = V9FS_I(inode);
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL;
struct p9_wstat wstat;
......@@ -1115,8 +1102,12 @@ static int v9fs_vfs_setattr(struct mnt_idmap *idmap,
}
/* Write all dirty data */
if (d_is_reg(dentry))
filemap_write_and_wait(inode->i_mapping);
if (d_is_reg(dentry)) {
retval = filemap_fdatawrite(inode->i_mapping);
if (retval)
p9_debug(P9_DEBUG_ERROR,
"flushing writeback during setattr returned %d\n", retval);
}
retval = p9_client_wstat(fid, &wstat);
......@@ -1127,9 +1118,17 @@ static int v9fs_vfs_setattr(struct mnt_idmap *idmap,
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(inode)) {
iattr->ia_size != i_size_read(inode)) {
truncate_setsize(inode, iattr->ia_size);
fscache_resize_cookie(v9fs_inode_cookie(v9inode), iattr->ia_size);
truncate_pagecache(inode, iattr->ia_size);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache & CACHE_FSCACHE) {
struct v9fs_inode *v9inode = V9FS_I(inode);
fscache_resize_cookie(v9fs_inode_cookie(v9inode), iattr->ia_size);
}
#endif
}
v9fs_invalidate_inode_attr(inode);
......@@ -1413,7 +1412,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
flags = (v9ses->cache & CACHE_LOOSE) ?
V9FS_STAT2INODE_KEEP_ISIZE : 0;
v9fs_stat2inode(st, inode, inode->i_sb, flags);
out:
......
......@@ -232,12 +232,12 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
int err = 0;
kgid_t gid;
umode_t mode;
int p9_omode = v9fs_open_to_dotl_flags(flags);
const unsigned char *name = NULL;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *fid = NULL;
struct v9fs_inode *v9inode;
struct p9_fid *dfid = NULL, *ofid = NULL, *inode_fid = NULL;
struct p9_fid *dfid = NULL, *ofid = NULL;
struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
struct dentry *res = NULL;
......@@ -282,14 +282,19 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in create %d\n",
err);
goto out;
}
err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
mode, gid, &qid);
if ((v9ses->cache & CACHE_WRITEBACK) && (p9_omode & P9_OWRITE)) {
p9_omode = (p9_omode & ~P9_OWRITE) | P9_ORDWR;
p9_debug(P9_DEBUG_CACHE,
"write-only file with writeback enabled, creating w/ O_RDWR\n");
}
err = p9_client_create_dotl(ofid, name, p9_omode, mode, gid, &qid);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in create %d\n",
err);
goto out;
}
......@@ -314,36 +319,19 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if ((v9ses->cache) && !v9inode->writeback_fid &&
((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
mutex_unlock(&v9inode->v_mutex);
goto out;
}
v9inode->writeback_fid = (void *) inode_fid;
}
mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
err = finish_open(file, dentry, generic_file_open);
if (err)
goto out;
file->private_data = ofid;
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache == CACHE_FSCACHE)
if (v9ses->cache & CACHE_FSCACHE) {
struct v9fs_inode *v9inode = V9FS_I(inode);
fscache_use_cookie(v9fs_inode_cookie(v9inode),
file->f_mode & FMODE_WRITE);
}
#endif
v9fs_fid_add_modes(ofid, v9ses->flags, v9ses->cache, flags);
v9fs_open_fid_add(inode, &ofid);
file->f_mode |= FMODE_CREATED;
out:
......@@ -415,7 +403,7 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
}
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
......@@ -458,13 +446,22 @@ v9fs_vfs_getattr_dotl(struct mnt_idmap *idmap,
struct dentry *dentry = path->dentry;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct inode *inode = d_inode(dentry);
struct p9_stat_dotl *st;
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(&nop_mnt_idmap, d_inode(dentry), stat);
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
generic_fillattr(&nop_mnt_idmap, inode, stat);
return 0;
} else if (v9ses->cache) {
if (S_ISREG(inode->i_mode)) {
int retval = filemap_fdatawrite(inode->i_mapping);
if (retval)
p9_debug(P9_DEBUG_ERROR,
"flushing writeback during getattr returned %d\n", retval);
}
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
......@@ -540,12 +537,13 @@ int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *iattr)
{
int retval, use_dentry = 0;
struct inode *inode = d_inode(dentry);
struct v9fs_session_info __maybe_unused *v9ses;
struct p9_fid *fid = NULL;
struct p9_iattr_dotl p9attr = {
.uid = INVALID_UID,
.gid = INVALID_GID,
};
struct inode *inode = d_inode(dentry);
p9_debug(P9_DEBUG_VFS, "\n");
......@@ -553,6 +551,8 @@ int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap,
if (retval)
return retval;
v9ses = v9fs_dentry2v9ses(dentry);
p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
if (iattr->ia_valid & ATTR_MODE)
p9attr.mode = iattr->ia_mode;
......@@ -583,8 +583,12 @@ int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap,
return PTR_ERR(fid);
/* Write all dirty data */
if (S_ISREG(inode->i_mode))
filemap_write_and_wait(inode->i_mapping);
if (S_ISREG(inode->i_mode)) {
retval = filemap_fdatawrite(inode->i_mapping);
if (retval < 0)
p9_debug(P9_DEBUG_ERROR,
"Flushing file prior to setattr failed: %d\n", retval);
}
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0) {
......@@ -593,9 +597,17 @@ int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap,
return retval;
}
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(inode))
if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size !=
i_size_read(inode)) {
truncate_setsize(inode, iattr->ia_size);
truncate_pagecache(inode, iattr->ia_size);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache & CACHE_FSCACHE)
fscache_resize_cookie(v9fs_inode_cookie(V9FS_I(inode)),
iattr->ia_size);
#endif
}
v9fs_invalidate_inode_attr(inode);
setattr_copy(&nop_mnt_idmap, inode, iattr);
......@@ -722,7 +734,7 @@ v9fs_vfs_symlink_dotl(struct mnt_idmap *idmap, struct inode *dir,
}
v9fs_invalidate_inode_attr(dir);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
/* Now walk from the parent so we can get an unopened fid. */
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
......@@ -799,7 +811,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
}
v9fs_invalidate_inode_attr(dir);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
/* Get the latest stat info from server. */
struct p9_fid *fid;
......@@ -876,7 +888,7 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
}
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
......@@ -961,7 +973,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
flags = (v9ses->cache & CACHE_LOOSE) ?
V9FS_STAT2INODE_KEEP_ISIZE : 0;
v9fs_stat2inode_dotl(st, inode, flags);
out:
......
......@@ -64,7 +64,8 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_magic = V9FS_MAGIC;
if (v9fs_proto_dotl(v9ses)) {
sb->s_op = &v9fs_super_ops_dotl;
sb->s_xattr = v9fs_xattr_handlers;
if (!(v9ses->flags & V9FS_NO_XATTR))
sb->s_xattr = v9fs_xattr_handlers;
} else {
sb->s_op = &v9fs_super_ops;
sb->s_time_max = U32_MAX;
......@@ -84,9 +85,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_bdi->io_pages = v9ses->maxdata >> PAGE_SHIFT;
}
sb->s_flags |= SB_ACTIVE | SB_DIRSYNC;
if (!v9ses->cache)
sb->s_flags |= SB_SYNCHRONOUS;
sb->s_flags |= SB_ACTIVE;
#ifdef CONFIG_9P_FS_POSIX_ACL
if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
......@@ -137,7 +136,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
if (retval)
goto release_sb;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
sb->s_d_op = &v9fs_cached_dentry_operations;
else
sb->s_d_op = &v9fs_dentry_operations;
......@@ -278,7 +277,7 @@ static int v9fs_drop_inode(struct inode *inode)
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
return generic_drop_inode(inode);
/*
* in case of non cached mode always drop the
......@@ -291,49 +290,30 @@ static int v9fs_drop_inode(struct inode *inode)
static int v9fs_write_inode(struct inode *inode,
struct writeback_control *wbc)
{
int ret;
struct p9_wstat wstat;
struct v9fs_inode *v9inode;
/*
* send an fsync request to server irrespective of
* wbc->sync_mode.
*/
p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
v9inode = V9FS_I(inode);
if (!v9inode->writeback_fid)
return 0;
v9fs_blank_wstat(&wstat);
ret = p9_client_wstat(v9inode->writeback_fid, &wstat);
if (ret < 0) {
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
}
v9inode = V9FS_I(inode);
fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode));
return 0;
}
static int v9fs_write_inode_dotl(struct inode *inode,
struct writeback_control *wbc)
{
int ret;
struct v9fs_inode *v9inode;
/*
* send an fsync request to server irrespective of
* wbc->sync_mode.
*/
v9inode = V9FS_I(inode);
p9_debug(P9_DEBUG_VFS, "%s: inode %p, writeback_fid %p\n",
__func__, inode, v9inode->writeback_fid);
if (!v9inode->writeback_fid)
return 0;
ret = p9_client_fsync(v9inode->writeback_fid, 0);
if (ret < 0) {
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
}
p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode));
return 0;
}
......
......@@ -42,6 +42,8 @@ enum p9_debug_flags {
P9_DEBUG_PKT = (1<<10),
P9_DEBUG_FSC = (1<<11),
P9_DEBUG_VPKT = (1<<12),
P9_DEBUG_CACHE = (1<<13),
P9_DEBUG_MMAP = (1<<14),
};
#ifdef CONFIG_NET_9P_DEBUG
......@@ -213,6 +215,10 @@ enum p9_open_mode_t {
P9_ORCLOSE = 0x40,
P9_OAPPEND = 0x80,
P9_OEXCL = 0x1000,
P9L_MODE_MASK = 0x1FFF, /* don't send anything under this to server */
P9L_DIRECT = 0x2000, /* cache disabled */
P9L_NOWRITECACHE = 0x4000, /* no write caching */
P9L_LOOSE = 0x8000, /* loose cache */
};
/**
......
......@@ -1230,9 +1230,9 @@ int p9_client_open(struct p9_fid *fid, int mode)
return -EINVAL;
if (p9_is_proto_dotl(clnt))
req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode);
req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode & P9L_MODE_MASK);
else
req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode);
req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode & P9L_MODE_MASK);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
......@@ -1277,7 +1277,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags,
return -EINVAL;
req = p9_client_rpc(clnt, P9_TLCREATE, "dsddg", ofid->fid, name, flags,
mode, gid);
mode & P9L_MODE_MASK, gid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
......@@ -1321,7 +1321,7 @@ int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode,
return -EINVAL;
req = p9_client_rpc(clnt, P9_TCREATE, "dsdb?s", fid->fid, name, perm,
mode, extension);
mode & P9L_MODE_MASK, extension);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment