Commit 1344b7ea authored by Trond Myklebust's avatar Trond Myklebust

NFS: Remove unused parameter from nfs_page_group_lock()

nfs_page_group_lock() is now always called with the 'nonblock'
parameter set to 'false'.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent dee83046
......@@ -134,19 +134,14 @@ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
/*
* nfs_page_group_lock - lock the head of the page group
* @req - request in group that is to be locked
* @nonblock - if true don't block waiting for lock
*
* this lock must be held if modifying the page group list
* this lock must be held when traversing or modifying the page
* group list
*
* return 0 on success, < 0 on error: -EDELAY if nonblocking or the
* result from wait_on_bit_lock
*
* NOTE: calling with nonblock=false should always have set the
* lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
* with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
* return 0 on success, < 0 on error
*/
int
nfs_page_group_lock(struct nfs_page *req, bool nonblock)
nfs_page_group_lock(struct nfs_page *req)
{
struct nfs_page *head = req->wb_head;
......@@ -155,14 +150,10 @@ nfs_page_group_lock(struct nfs_page *req, bool nonblock)
if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
return 0;
if (!nonblock) {
set_bit(PG_CONTENDED1, &head->wb_flags);
smp_mb__after_atomic();
return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
set_bit(PG_CONTENDED1, &head->wb_flags);
smp_mb__after_atomic();
return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
}
return -EAGAIN;
}
/*
......@@ -225,7 +216,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
{
bool ret;
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
ret = nfs_page_group_sync_on_bit_locked(req, bit);
nfs_page_group_unlock(req);
......@@ -1016,7 +1007,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
unsigned int bytes_left = 0;
unsigned int offset, pgbase;
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
subreq = req;
bytes_left = subreq->wb_bytes;
......@@ -1038,7 +1029,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
if (mirror->pg_recoalesce)
return 0;
/* retry add_request for this subreq */
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
continue;
}
......@@ -1135,7 +1126,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
if (midx) {
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
/* find the last request */
for (lastreq = req->wb_head;
......
......@@ -271,7 +271,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req)
unsigned int pos = 0;
unsigned int len = nfs_page_length(req->wb_page);
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
do {
tmp = nfs_page_group_search_locked(req->wb_head, pos);
......@@ -480,7 +480,7 @@ nfs_lock_and_join_requests(struct page *page)
}
spin_unlock(&inode->i_lock);
ret = nfs_page_group_lock(head, false);
ret = nfs_page_group_lock(head);
if (ret < 0) {
nfs_unlock_and_release_request(head);
return ERR_PTR(ret);
......@@ -501,7 +501,7 @@ nfs_lock_and_join_requests(struct page *page)
nfs_page_group_unlock(head);
ret = nfs_wait_on_request(subreq);
if (!ret)
ret = nfs_page_group_lock(head, false);
ret = nfs_page_group_lock(head);
if (ret < 0) {
nfs_unroll_locks(inode, head, subreq);
nfs_release_request(subreq);
......
......@@ -139,7 +139,7 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern int nfs_page_group_lock(struct nfs_page *, bool);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment