Commit 1c75950b authored by Trond Myklebust's avatar Trond Myklebust

NFS: cleanup of nfs_sync_inode_wait()

Allow callers to directly pass it a struct writeback_control.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 3f442547
...@@ -307,11 +307,14 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse ...@@ -307,11 +307,14 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
static void nfs_invalidate_page(struct page *page, unsigned long offset) static void nfs_invalidate_page(struct page *page, unsigned long offset)
{ {
struct inode *inode = page->mapping->host; loff_t range_start, range_end;
if (offset != 0)
return;
/* Cancel any unstarted writes on this page */ /* Cancel any unstarted writes on this page */
if (offset == 0) range_start = page_offset(page);
nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE); range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
nfs_sync_mapping_range(page->mapping, range_start, range_end, FLUSH_INVALIDATE);
} }
static int nfs_release_page(struct page *page, gfp_t gfp) static int nfs_release_page(struct page *page, gfp_t gfp)
......
...@@ -422,7 +422,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) ...@@ -422,7 +422,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
int err; int err;
/* Flush out writes to the server in order to update c/mtime */ /* Flush out writes to the server in order to update c/mtime */
nfs_sync_inode_wait(inode, 0, 0, FLUSH_NOCOMMIT); nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT);
/* /*
* We may force a getattr if the user cares about atime. * We may force a getattr if the user cares about atime.
......
...@@ -80,6 +80,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*, ...@@ -80,6 +80,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*,
static int nfs_wait_on_write_congestion(struct address_space *, int); static int nfs_wait_on_write_congestion(struct address_space *, int);
static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how);
static const struct rpc_call_ops nfs_write_partial_ops; static const struct rpc_call_ops nfs_write_partial_ops;
static const struct rpc_call_ops nfs_write_full_ops; static const struct rpc_call_ops nfs_write_full_ops;
static const struct rpc_call_ops nfs_commit_ops; static const struct rpc_call_ops nfs_commit_ops;
...@@ -1476,29 +1477,38 @@ int nfs_commit_inode(struct inode *inode, int how) ...@@ -1476,29 +1477,38 @@ int nfs_commit_inode(struct inode *inode, int how)
} }
#endif #endif
long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
unsigned int npages, int how)
{ {
struct inode *inode = mapping->host;
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
struct address_space *mapping = inode->i_mapping; unsigned long idx_start, idx_end;
struct writeback_control wbc = { unsigned int npages = 0;
.bdi = mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_start = ((loff_t)idx_start) << PAGE_CACHE_SHIFT,
.range_end = ((loff_t)(idx_start + npages - 1)) << PAGE_CACHE_SHIFT,
};
LIST_HEAD(head); LIST_HEAD(head);
int nocommit = how & FLUSH_NOCOMMIT; int nocommit = how & FLUSH_NOCOMMIT;
long pages, ret; long pages, ret;
/* FIXME */
if (wbc->range_cyclic)
idx_start = 0;
else {
idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (idx_end > idx_start) {
unsigned long l_npages = 1 + idx_end - idx_start;
npages = l_npages;
if (sizeof(npages) != sizeof(l_npages) &&
(unsigned long)npages != l_npages)
npages = 0;
}
}
how &= ~FLUSH_NOCOMMIT; how &= ~FLUSH_NOCOMMIT;
spin_lock(&nfsi->req_lock); spin_lock(&nfsi->req_lock);
do { do {
wbc->pages_skipped = 0;
ret = nfs_wait_on_requests_locked(inode, idx_start, npages); ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
if (ret != 0) if (ret != 0)
continue; continue;
pages = nfs_scan_dirty(mapping, &wbc, &head); pages = nfs_scan_dirty(mapping, wbc, &head);
if (pages != 0) { if (pages != 0) {
spin_unlock(&nfsi->req_lock); spin_unlock(&nfsi->req_lock);
if (how & FLUSH_INVALIDATE) { if (how & FLUSH_INVALIDATE) {
...@@ -1509,11 +1519,16 @@ long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, ...@@ -1509,11 +1519,16 @@ long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
spin_lock(&nfsi->req_lock); spin_lock(&nfsi->req_lock);
continue; continue;
} }
if (wbc->pages_skipped != 0)
continue;
if (nocommit) if (nocommit)
break; break;
pages = nfs_scan_commit(inode, &head, idx_start, npages); pages = nfs_scan_commit(inode, &head, idx_start, npages);
if (pages == 0) if (pages == 0) {
if (wbc->pages_skipped != 0)
continue;
break; break;
}
if (how & FLUSH_INVALIDATE) { if (how & FLUSH_INVALIDATE) {
spin_unlock(&nfsi->req_lock); spin_unlock(&nfsi->req_lock);
nfs_cancel_commit_list(&head); nfs_cancel_commit_list(&head);
...@@ -1530,6 +1545,60 @@ long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, ...@@ -1530,6 +1545,60 @@ long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
return ret; return ret;
} }
/*
* flush the inode to disk.
*/
int nfs_wb_all(struct inode *inode)
{
struct address_space *mapping = inode->i_mapping;
struct writeback_control wbc = {
.bdi = mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_cyclic = 1,
};
int ret;
ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
if (ret >= 0)
return 0;
return ret;
}
int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how)
{
struct writeback_control wbc = {
.bdi = mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_start = range_start,
.range_end = range_end,
};
int ret;
ret = nfs_sync_mapping_wait(mapping, &wbc, how);
if (ret >= 0)
return 0;
return ret;
}
static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
{
loff_t range_start = page_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
return nfs_sync_mapping_range(inode->i_mapping, range_start, range_end, how | FLUSH_STABLE);
}
/*
* Write back all requests on one page - we do this before reading it.
*/
int nfs_wb_page(struct inode *inode, struct page* page)
{
return nfs_wb_page_priority(inode, page, 0);
}
int __init nfs_init_writepagecache(void) int __init nfs_init_writepagecache(void)
{ {
nfs_wdata_cachep = kmem_cache_create("nfs_write_data", nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
......
...@@ -432,7 +432,10 @@ extern void nfs_writedata_release(void *); ...@@ -432,7 +432,10 @@ extern void nfs_writedata_release(void *);
* Try to write back everything synchronously (but check the * Try to write back everything synchronously (but check the
* return value!) * return value!)
*/ */
extern long nfs_sync_inode_wait(struct inode *, unsigned long, unsigned int, int); extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int);
extern int nfs_sync_mapping_range(struct address_space *, loff_t, loff_t, int);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern int nfs_commit_inode(struct inode *, int); extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_write_data *nfs_commit_alloc(void); extern struct nfs_write_data *nfs_commit_alloc(void);
...@@ -452,28 +455,6 @@ nfs_have_writebacks(struct inode *inode) ...@@ -452,28 +455,6 @@ nfs_have_writebacks(struct inode *inode)
return NFS_I(inode)->npages != 0; return NFS_I(inode)->npages != 0;
} }
static inline int
nfs_wb_all(struct inode *inode)
{
int error = nfs_sync_inode_wait(inode, 0, 0, 0);
return (error < 0) ? error : 0;
}
/*
* Write back all requests on one page - we do this before reading it.
*/
static inline int nfs_wb_page_priority(struct inode *inode, struct page* page, int how)
{
int error = nfs_sync_inode_wait(inode, page->index, 1,
how | FLUSH_STABLE);
return (error < 0) ? error : 0;
}
static inline int nfs_wb_page(struct inode *inode, struct page* page)
{
return nfs_wb_page_priority(inode, page, 0);
}
/* /*
* Allocate nfs_write_data structures * Allocate nfs_write_data structures
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment