Commit 1a54533e authored by Trond Myklebust's avatar Trond Myklebust

NFS: Add nfs_set_page_dirty()

We will want to allow nfs_writepage() to distinguish between pages that
have been marked as dirty by the VM, and those that have been marked as
dirty by nfs_updatepage().
In the former case, the entire page will want to be written out, and so any
requests that were pending need to be flushed out first.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 200baa21
...@@ -331,7 +331,7 @@ static int nfs_release_page(struct page *page, gfp_t gfp) ...@@ -331,7 +331,7 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
const struct address_space_operations nfs_file_aops = { const struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage, .readpage = nfs_readpage,
.readpages = nfs_readpages, .readpages = nfs_readpages,
.set_page_dirty = __set_page_dirty_nobuffers, .set_page_dirty = nfs_set_page_dirty,
.writepage = nfs_writepage, .writepage = nfs_writepage,
.writepages = nfs_writepages, .writepages = nfs_writepages,
.prepare_write = nfs_prepare_write, .prepare_write = nfs_prepare_write,
......
...@@ -251,16 +251,23 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) ...@@ -251,16 +251,23 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
{ {
struct nfs_open_context *ctx; struct nfs_open_context *ctx;
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct nfs_page *req;
unsigned offset; unsigned offset;
int err; int err = 0;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
/* Ensure we've flushed out any previous writes */ req = nfs_page_find_request(page);
if (req != NULL) {
int flushme = test_bit(PG_NEED_FLUSH, &req->wb_flags);
nfs_release_request(req);
if (!flushme)
goto out;
/* Ensure we've flushed out the invalid write */
nfs_wb_page_priority(inode, page, wb_priority(wbc)); nfs_wb_page_priority(inode, page, wb_priority(wbc));
}
err = 0;
offset = nfs_page_length(page); offset = nfs_page_length(page);
if (!offset) if (!offset)
goto out; goto out;
...@@ -655,7 +662,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) ...@@ -655,7 +662,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
{ {
struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
struct nfs_page *req; struct nfs_page *req;
int status = 0; int do_flush, status;
/* /*
* Look for a request corresponding to this page. If there * Look for a request corresponding to this page. If there
* is one, and it belongs to another file, we flush it out * is one, and it belongs to another file, we flush it out
...@@ -664,15 +671,18 @@ int nfs_flush_incompatible(struct file *file, struct page *page) ...@@ -664,15 +671,18 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
* Also do the same if we find a request from an existing * Also do the same if we find a request from an existing
* dropped page. * dropped page.
*/ */
do {
req = nfs_page_find_request(page); req = nfs_page_find_request(page);
if (req != NULL) { if (req == NULL)
int do_flush = req->wb_page != page || req->wb_context != ctx; return 0;
do_flush = req->wb_page != page || req->wb_context != ctx
|| test_bit(PG_NEED_FLUSH, &req->wb_flags);
nfs_release_request(req); nfs_release_request(req);
if (do_flush) if (!do_flush)
return 0;
status = nfs_wb_page(page->mapping->host, page); status = nfs_wb_page(page->mapping->host, page);
} } while (status == 0);
return (status < 0) ? status : 0; return status;
} }
/* /*
...@@ -1437,6 +1447,19 @@ int nfs_wb_page(struct inode *inode, struct page* page) ...@@ -1437,6 +1447,19 @@ int nfs_wb_page(struct inode *inode, struct page* page)
return nfs_wb_page_priority(inode, page, 0); return nfs_wb_page_priority(inode, page, 0);
} }
int nfs_set_page_dirty(struct page *page)
{
struct nfs_page *req;
req = nfs_page_find_request(page);
if (req != NULL) {
/* Mark any existing write requests for flushing */
set_bit(PG_NEED_FLUSH, &req->wb_flags);
nfs_release_request(req);
}
return __set_page_dirty_nobuffers(page);
}
int __init nfs_init_writepagecache(void) int __init nfs_init_writepagecache(void)
{ {
......
...@@ -427,6 +427,7 @@ extern int nfs_flush_incompatible(struct file *file, struct page *page); ...@@ -427,6 +427,7 @@ extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
extern void nfs_writedata_release(void *); extern void nfs_writedata_release(void *);
extern int nfs_set_page_dirty(struct page *);
/* /*
* Try to write back everything synchronously (but check the * Try to write back everything synchronously (but check the
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define PG_BUSY 0 #define PG_BUSY 0
#define PG_NEED_COMMIT 1 #define PG_NEED_COMMIT 1
#define PG_NEED_RESCHED 2 #define PG_NEED_RESCHED 2
#define PG_NEED_FLUSH 3
struct nfs_inode; struct nfs_inode;
struct nfs_page { struct nfs_page {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment