Commit 27852596 authored by Nick Piggin's avatar Nick Piggin Committed by Trond Myklebust

nfs: use GFP_NOFS preloads for radix-tree insertion

NFS should use GFP_NOFS mode radix tree preloads rather than GFP_ATOMIC
allocations at radix-tree insertion-time.  This is important to reduce the
atomic memory requirement.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 8d042218
...@@ -360,15 +360,13 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) ...@@ -360,15 +360,13 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
/* /*
* Insert a write request into an inode * Insert a write request into an inode
*/ */
static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
{ {
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
int error; int error;
error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
BUG_ON(error == -EEXIST); BUG_ON(error);
if (error)
return error;
if (!nfsi->npages) { if (!nfsi->npages) {
igrab(inode); igrab(inode);
if (nfs_have_delegation(inode, FMODE_WRITE)) if (nfs_have_delegation(inode, FMODE_WRITE))
...@@ -378,8 +376,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) ...@@ -378,8 +376,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
set_page_private(req->wb_page, (unsigned long)req); set_page_private(req->wb_page, (unsigned long)req);
nfsi->npages++; nfsi->npages++;
kref_get(&req->wb_kref); kref_get(&req->wb_kref);
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
return 0; NFS_PAGE_TAG_LOCKED);
} }
/* /*
...@@ -591,6 +589,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, ...@@ -591,6 +589,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
/* Loop over all inode entries and see if we find /* Loop over all inode entries and see if we find
* A request for the page we wish to update * A request for the page we wish to update
*/ */
if (new) {
if (radix_tree_preload(GFP_NOFS)) {
nfs_release_request(new);
return ERR_PTR(-ENOMEM);
}
}
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
req = nfs_page_find_request_locked(page); req = nfs_page_find_request_locked(page);
if (req) { if (req) {
...@@ -601,28 +606,27 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, ...@@ -601,28 +606,27 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
error = nfs_wait_on_request(req); error = nfs_wait_on_request(req);
nfs_release_request(req); nfs_release_request(req);
if (error < 0) { if (error < 0) {
if (new) if (new) {
radix_tree_preload_end();
nfs_release_request(new); nfs_release_request(new);
}
return ERR_PTR(error); return ERR_PTR(error);
} }
continue; continue;
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
if (new) if (new) {
radix_tree_preload_end();
nfs_release_request(new); nfs_release_request(new);
}
break; break;
} }
if (new) { if (new) {
int error;
nfs_lock_request_dontget(new); nfs_lock_request_dontget(new);
error = nfs_inode_add_request(inode, new); nfs_inode_add_request(inode, new);
if (error) {
spin_unlock(&inode->i_lock);
nfs_unlock_request(new);
return ERR_PTR(error);
}
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
radix_tree_preload_end();
req = new; req = new;
goto zero_page; goto zero_page;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment