Commit e468bae9 authored by Trond Myklebust's avatar Trond Myklebust

NFS: Allow redirtying of a completed unstable write.

Currently, if an unstable write completes, we cannot redirty the page in
order to reflect a new change in the page data until after we've sent a
COMMIT request.

This patch allows a page rewrite to proceed without the unnecessary COMMIT
step, putting it immediately back onto the dirty page list, undoing the
VM unstable write accounting, and removing the NFS_PAGE_TAG_COMMIT tag from
the NFS radix tree.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent e7d39069
......@@ -242,12 +242,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
return ret;
spin_lock(&inode->i_lock);
}
if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
/* This request is marked for commit */
if (test_bit(PG_CLEAN, &req->wb_flags)) {
spin_unlock(&inode->i_lock);
nfs_clear_page_tag_locked(req);
nfs_pageio_complete(pgio);
return 0;
BUG();
}
if (nfs_set_page_writeback(page) != 0) {
spin_unlock(&inode->i_lock);
......@@ -391,19 +388,6 @@ nfs_mark_request_dirty(struct nfs_page *req)
__set_page_dirty_nobuffers(req->wb_page);
}
/*
* Check if a request is dirty
*/
static inline int
nfs_dirty_request(struct nfs_page *req)
{
struct page *page = req->wb_page;
if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
return 0;
return !PageWriteback(page);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
/*
* Add a request to the inode's commit list.
......@@ -416,7 +400,7 @@ nfs_mark_request_commit(struct nfs_page *req)
spin_lock(&inode->i_lock);
nfsi->ncommit++;
set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
set_bit(PG_CLEAN, &(req)->wb_flags);
radix_tree_tag_set(&nfsi->nfs_page_tree,
req->wb_index,
NFS_PAGE_TAG_COMMIT);
......@@ -426,6 +410,19 @@ nfs_mark_request_commit(struct nfs_page *req)
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
static int
nfs_clear_request_commit(struct nfs_page *req)
{
struct page *page = req->wb_page;
if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
dec_zone_page_state(page, NR_UNSTABLE_NFS);
dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
return 1;
}
return 0;
}
static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
......@@ -435,7 +432,7 @@ int nfs_write_need_commit(struct nfs_write_data *data)
static inline
int nfs_reschedule_unstable_write(struct nfs_page *req)
{
if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
nfs_mark_request_commit(req);
return 1;
}
......@@ -451,6 +448,12 @@ nfs_mark_request_commit(struct nfs_page *req)
{
}
static inline int
nfs_clear_request_commit(struct nfs_page *req)
{
return 0;
}
static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
......@@ -508,11 +511,8 @@ static void nfs_cancel_commit_list(struct list_head *head)
while(!list_empty(head)) {
req = nfs_list_entry(head->next);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
BDI_RECLAIMABLE);
nfs_list_remove_request(req);
clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
nfs_clear_request_commit(req);
nfs_inode_remove_request(req);
nfs_unlock_request(req);
}
......@@ -584,8 +584,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
* Note: nfs_flush_incompatible() will already
* have flushed out requests having wrong owners.
*/
if (!nfs_dirty_request(req)
|| offset > rqend
if (offset > rqend
|| end < req->wb_offset)
goto out_flushme;
......@@ -601,6 +600,10 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
spin_lock(&inode->i_lock);
}
if (nfs_clear_request_commit(req))
radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
req->wb_index, NFS_PAGE_TAG_COMMIT);
/* Okay, the request matches. Update the region */
if (offset < req->wb_offset) {
req->wb_offset = offset;
......@@ -682,8 +685,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
req = nfs_page_find_request(page);
if (req == NULL)
return 0;
do_flush = req->wb_page != page || req->wb_context != ctx
|| !nfs_dirty_request(req);
do_flush = req->wb_page != page || req->wb_context != ctx;
nfs_release_request(req);
if (!do_flush)
return 0;
......@@ -1288,10 +1290,7 @@ static void nfs_commit_release(void *calldata)
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
BDI_RECLAIMABLE);
nfs_clear_request_commit(req);
dprintk("NFS: commit (%s/%lld %d@%lld)",
req->wb_context->path.dentry->d_inode->i_sb->s_id,
......@@ -1467,7 +1466,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
req = nfs_page_find_request(page);
if (req == NULL)
goto out;
if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
if (test_bit(PG_CLEAN, &req->wb_flags)) {
nfs_release_request(req);
break;
}
......
......@@ -27,9 +27,12 @@
/*
* Valid flags for a dirty buffer
*/
#define PG_BUSY 0
#define PG_NEED_COMMIT 1
#define PG_NEED_RESCHED 2
enum {
PG_BUSY = 0,
PG_CLEAN,
PG_NEED_COMMIT,
PG_NEED_RESCHED,
};
struct nfs_inode;
struct nfs_page {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment