Commit 63185942 authored by Bryan Schumaker's avatar Bryan Schumaker Committed by Trond Myklebust

lockd: Remove BKL from the client

This patch removes all calls to lock_kernel() from the client.  This patch
should be applied after the "fs/lock.c prepare for BKL removal" patch submitted
by Arnd Bergmann on September 18.
Signed-off-by: default avatarBryan Schumaker <bjschuma@netapp.com>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent b4687da7
......@@ -42,6 +42,7 @@ struct nlm_wait {
};
static LIST_HEAD(nlm_blocked);
static DEFINE_SPINLOCK(nlm_blocked_lock);
/**
* nlmclnt_init - Set up per-NFS mount point lockd data structures
......@@ -97,7 +98,10 @@ struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *
block->b_lock = fl;
init_waitqueue_head(&block->b_wait);
block->b_status = nlm_lck_blocked;
spin_lock(&nlm_blocked_lock);
list_add(&block->b_list, &nlm_blocked);
spin_unlock(&nlm_blocked_lock);
}
return block;
}
......@@ -106,7 +110,9 @@ void nlmclnt_finish_block(struct nlm_wait *block)
{
if (block == NULL)
return;
spin_lock(&nlm_blocked_lock);
list_del(&block->b_list);
spin_unlock(&nlm_blocked_lock);
kfree(block);
}
......@@ -154,6 +160,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
* Look up blocked request based on arguments.
* Warning: must not use cookie to match it!
*/
spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
struct file_lock *fl_blocked = block->b_lock;
......@@ -178,6 +185,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
wake_up(&block->b_wait);
res = nlm_granted;
}
spin_unlock(&nlm_blocked_lock);
return res;
}
......@@ -216,10 +224,6 @@ reclaimer(void *ptr)
allow_signal(SIGKILL);
down_write(&host->h_rwsem);
/* This one ensures that our parent doesn't terminate while the
* reclaim is in progress */
lock_kernel();
lockd_up(); /* note: this cannot fail as lockd is already running */
dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
......@@ -260,16 +264,17 @@ reclaimer(void *ptr)
dprintk("NLM: done reclaiming locks for host %s\n", host->h_name);
/* Now, wake up all processes that sleep on a blocked lock */
spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (block->b_host == host) {
block->b_status = nlm_lck_denied_grace_period;
wake_up(&block->b_wait);
}
}
spin_unlock(&nlm_blocked_lock);
/* Release host handle after use */
nlm_release_host(host);
lockd_down();
unlock_kernel();
return 0;
}
......@@ -166,7 +166,6 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
lock_kernel();
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
......@@ -177,10 +176,8 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
status = nlmclnt_test(call, fl);
else
status = -EINVAL;
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
unlock_kernel();
dprintk("lockd: clnt proc returns %d\n", status);
return status;
......@@ -226,9 +223,7 @@ void nlm_release_call(struct nlm_rqst *call)
static void nlmclnt_rpc_release(void *data)
{
lock_kernel();
nlm_release_call(data);
unlock_kernel();
}
static int nlm_wait_on_grace(wait_queue_head_t *queue)
......@@ -448,14 +443,18 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
}
static void nlmclnt_locks_release_private(struct file_lock *fl)
{
spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
list_del(&fl->fl_u.nfs_fl.list);
spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
}
......@@ -721,9 +720,7 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
die:
return;
retry_rebind:
lock_kernel();
nlm_rebind_host(req->a_host);
unlock_kernel();
retry_unlock:
rpc_restart_call(task);
}
......@@ -801,9 +798,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
/* Don't ever retry more than 3 times */
if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
goto die;
lock_kernel();
nlm_rebind_host(req->a_host);
unlock_kernel();
rpc_restart_call(task);
rpc_delay(task, 30 * HZ);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment