Commit 578acf9a authored by Alexander Aring's avatar Alexander Aring Committed by David Teigland

dlm: use spin_lock_bh for message processing

Use spin_lock_bh for all spinlocks involved in message processing,
in preparation for softirq message processing.  DLM lock requests
from user space involve dlm processing in user context, in addition
to the standard kernel context, necessitating bh variants.
Signed-off-by: default avatarAlexander Aring <aahringo@redhat.com>
Signed-off-by: default avatarDavid Teigland <teigland@redhat.com>
parent 308533b4
...@@ -142,12 +142,12 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, ...@@ -142,12 +142,12 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
cb->astparam = lkb->lkb_astparam; cb->astparam = lkb->lkb_astparam;
INIT_WORK(&cb->work, dlm_callback_work); INIT_WORK(&cb->work, dlm_callback_work);
spin_lock(&ls->ls_cb_lock); spin_lock_bh(&ls->ls_cb_lock);
if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) if (test_bit(LSFL_CB_DELAY, &ls->ls_flags))
list_add(&cb->list, &ls->ls_cb_delay); list_add(&cb->list, &ls->ls_cb_delay);
else else
queue_work(ls->ls_callback_wq, &cb->work); queue_work(ls->ls_callback_wq, &cb->work);
spin_unlock(&ls->ls_cb_lock); spin_unlock_bh(&ls->ls_cb_lock);
break; break;
case DLM_ENQUEUE_CALLBACK_SUCCESS: case DLM_ENQUEUE_CALLBACK_SUCCESS:
break; break;
...@@ -179,9 +179,9 @@ void dlm_callback_stop(struct dlm_ls *ls) ...@@ -179,9 +179,9 @@ void dlm_callback_stop(struct dlm_ls *ls)
void dlm_callback_suspend(struct dlm_ls *ls) void dlm_callback_suspend(struct dlm_ls *ls)
{ {
if (ls->ls_callback_wq) { if (ls->ls_callback_wq) {
spin_lock(&ls->ls_cb_lock); spin_lock_bh(&ls->ls_cb_lock);
set_bit(LSFL_CB_DELAY, &ls->ls_flags); set_bit(LSFL_CB_DELAY, &ls->ls_flags);
spin_unlock(&ls->ls_cb_lock); spin_unlock_bh(&ls->ls_cb_lock);
flush_workqueue(ls->ls_callback_wq); flush_workqueue(ls->ls_callback_wq);
} }
...@@ -199,7 +199,7 @@ void dlm_callback_resume(struct dlm_ls *ls) ...@@ -199,7 +199,7 @@ void dlm_callback_resume(struct dlm_ls *ls)
return; return;
more: more:
spin_lock(&ls->ls_cb_lock); spin_lock_bh(&ls->ls_cb_lock);
list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) { list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) {
list_del(&cb->list); list_del(&cb->list);
queue_work(ls->ls_callback_wq, &cb->work); queue_work(ls->ls_callback_wq, &cb->work);
...@@ -210,7 +210,7 @@ void dlm_callback_resume(struct dlm_ls *ls) ...@@ -210,7 +210,7 @@ void dlm_callback_resume(struct dlm_ls *ls)
empty = list_empty(&ls->ls_cb_delay); empty = list_empty(&ls->ls_cb_delay);
if (empty) if (empty)
clear_bit(LSFL_CB_DELAY, &ls->ls_flags); clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
spin_unlock(&ls->ls_cb_lock); spin_unlock_bh(&ls->ls_cb_lock);
sum += count; sum += count;
if (!empty) { if (!empty) {
......
...@@ -452,7 +452,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) ...@@ -452,7 +452,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
spin_lock(&ls->ls_rsbtbl[bucket].lock); spin_lock_bh(&ls->ls_rsbtbl[bucket].lock);
if (!RB_EMPTY_ROOT(tree)) { if (!RB_EMPTY_ROOT(tree)) {
for (node = rb_first(tree); node; node = rb_next(node)) { for (node = rb_first(tree); node; node = rb_next(node)) {
r = rb_entry(node, struct dlm_rsb, res_hashnode); r = rb_entry(node, struct dlm_rsb, res_hashnode);
...@@ -460,12 +460,12 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) ...@@ -460,12 +460,12 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
dlm_hold_rsb(r); dlm_hold_rsb(r);
ri->rsb = r; ri->rsb = r;
ri->bucket = bucket; ri->bucket = bucket;
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
return ri; return ri;
} }
} }
} }
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
/* /*
* move to the first rsb in the next non-empty bucket * move to the first rsb in the next non-empty bucket
...@@ -484,18 +484,18 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) ...@@ -484,18 +484,18 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
} }
tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
spin_lock(&ls->ls_rsbtbl[bucket].lock); spin_lock_bh(&ls->ls_rsbtbl[bucket].lock);
if (!RB_EMPTY_ROOT(tree)) { if (!RB_EMPTY_ROOT(tree)) {
node = rb_first(tree); node = rb_first(tree);
r = rb_entry(node, struct dlm_rsb, res_hashnode); r = rb_entry(node, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r); dlm_hold_rsb(r);
ri->rsb = r; ri->rsb = r;
ri->bucket = bucket; ri->bucket = bucket;
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
*pos = n; *pos = n;
return ri; return ri;
} }
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
} }
} }
...@@ -516,7 +516,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) ...@@ -516,7 +516,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
* move to the next rsb in the same bucket * move to the next rsb in the same bucket
*/ */
spin_lock(&ls->ls_rsbtbl[bucket].lock); spin_lock_bh(&ls->ls_rsbtbl[bucket].lock);
rp = ri->rsb; rp = ri->rsb;
next = rb_next(&rp->res_hashnode); next = rb_next(&rp->res_hashnode);
...@@ -524,12 +524,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) ...@@ -524,12 +524,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
r = rb_entry(next, struct dlm_rsb, res_hashnode); r = rb_entry(next, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r); dlm_hold_rsb(r);
ri->rsb = r; ri->rsb = r;
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
dlm_put_rsb(rp); dlm_put_rsb(rp);
++*pos; ++*pos;
return ri; return ri;
} }
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
dlm_put_rsb(rp); dlm_put_rsb(rp);
/* /*
...@@ -550,18 +550,18 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) ...@@ -550,18 +550,18 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
} }
tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
spin_lock(&ls->ls_rsbtbl[bucket].lock); spin_lock_bh(&ls->ls_rsbtbl[bucket].lock);
if (!RB_EMPTY_ROOT(tree)) { if (!RB_EMPTY_ROOT(tree)) {
next = rb_first(tree); next = rb_first(tree);
r = rb_entry(next, struct dlm_rsb, res_hashnode); r = rb_entry(next, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r); dlm_hold_rsb(r);
ri->rsb = r; ri->rsb = r;
ri->bucket = bucket; ri->bucket = bucket;
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
*pos = n; *pos = n;
return ri; return ri;
} }
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
} }
} }
...@@ -743,7 +743,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, ...@@ -743,7 +743,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
goto out; goto out;
} }
spin_lock(&ls->ls_waiters_lock); spin_lock_bh(&ls->ls_waiters_lock);
memset(debug_buf, 0, sizeof(debug_buf)); memset(debug_buf, 0, sizeof(debug_buf));
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
...@@ -754,7 +754,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, ...@@ -754,7 +754,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
break; break;
pos += ret; pos += ret;
} }
spin_unlock(&ls->ls_waiters_lock); spin_unlock_bh(&ls->ls_waiters_lock);
dlm_unlock_recovery(ls); dlm_unlock_recovery(ls);
rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos); rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
......
...@@ -204,12 +204,12 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, ...@@ -204,12 +204,12 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name,
hash = jhash(name, len, 0); hash = jhash(name, len, 0);
bucket = hash & (ls->ls_rsbtbl_size - 1); bucket = hash & (ls->ls_rsbtbl_size - 1);
spin_lock(&ls->ls_rsbtbl[bucket].lock); spin_lock_bh(&ls->ls_rsbtbl[bucket].lock);
rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r);
if (rv) if (rv)
rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss,
name, len, &r); name, len, &r);
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
if (!rv) if (!rv)
return r; return r;
...@@ -245,7 +245,7 @@ static void drop_dir_ctx(struct dlm_ls *ls, int nodeid) ...@@ -245,7 +245,7 @@ static void drop_dir_ctx(struct dlm_ls *ls, int nodeid)
{ {
struct dlm_dir_dump *dd, *safe; struct dlm_dir_dump *dd, *safe;
write_lock(&ls->ls_dir_dump_lock); write_lock_bh(&ls->ls_dir_dump_lock);
list_for_each_entry_safe(dd, safe, &ls->ls_dir_dump_list, list) { list_for_each_entry_safe(dd, safe, &ls->ls_dir_dump_list, list) {
if (dd->nodeid_init == nodeid) { if (dd->nodeid_init == nodeid) {
log_error(ls, "drop dump seq %llu", log_error(ls, "drop dump seq %llu",
...@@ -254,21 +254,21 @@ static void drop_dir_ctx(struct dlm_ls *ls, int nodeid) ...@@ -254,21 +254,21 @@ static void drop_dir_ctx(struct dlm_ls *ls, int nodeid)
kfree(dd); kfree(dd);
} }
} }
write_unlock(&ls->ls_dir_dump_lock); write_unlock_bh(&ls->ls_dir_dump_lock);
} }
static struct dlm_dir_dump *lookup_dir_dump(struct dlm_ls *ls, int nodeid) static struct dlm_dir_dump *lookup_dir_dump(struct dlm_ls *ls, int nodeid)
{ {
struct dlm_dir_dump *iter, *dd = NULL; struct dlm_dir_dump *iter, *dd = NULL;
read_lock(&ls->ls_dir_dump_lock); read_lock_bh(&ls->ls_dir_dump_lock);
list_for_each_entry(iter, &ls->ls_dir_dump_list, list) { list_for_each_entry(iter, &ls->ls_dir_dump_list, list) {
if (iter->nodeid_init == nodeid) { if (iter->nodeid_init == nodeid) {
dd = iter; dd = iter;
break; break;
} }
} }
read_unlock(&ls->ls_dir_dump_lock); read_unlock_bh(&ls->ls_dir_dump_lock);
return dd; return dd;
} }
...@@ -291,9 +291,9 @@ static struct dlm_dir_dump *init_dir_dump(struct dlm_ls *ls, int nodeid) ...@@ -291,9 +291,9 @@ static struct dlm_dir_dump *init_dir_dump(struct dlm_ls *ls, int nodeid)
dd->seq_init = ls->ls_recover_seq; dd->seq_init = ls->ls_recover_seq;
dd->nodeid_init = nodeid; dd->nodeid_init = nodeid;
write_lock(&ls->ls_dir_dump_lock); write_lock_bh(&ls->ls_dir_dump_lock);
list_add(&dd->list, &ls->ls_dir_dump_list); list_add(&dd->list, &ls->ls_dir_dump_list);
write_unlock(&ls->ls_dir_dump_lock); write_unlock_bh(&ls->ls_dir_dump_lock);
return dd; return dd;
} }
...@@ -311,7 +311,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, ...@@ -311,7 +311,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
struct dlm_dir_dump *dd; struct dlm_dir_dump *dd;
__be16 be_namelen; __be16 be_namelen;
read_lock(&ls->ls_masters_lock); read_lock_bh(&ls->ls_masters_lock);
if (inlen > 1) { if (inlen > 1) {
dd = lookup_dir_dump(ls, nodeid); dd = lookup_dir_dump(ls, nodeid);
...@@ -397,12 +397,12 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, ...@@ -397,12 +397,12 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
log_rinfo(ls, "dlm_recover_directory nodeid %d sent %u res out %u messages", log_rinfo(ls, "dlm_recover_directory nodeid %d sent %u res out %u messages",
nodeid, dd->sent_res, dd->sent_msg); nodeid, dd->sent_res, dd->sent_msg);
write_lock(&ls->ls_dir_dump_lock); write_lock_bh(&ls->ls_dir_dump_lock);
list_del_init(&dd->list); list_del_init(&dd->list);
write_unlock(&ls->ls_dir_dump_lock); write_unlock_bh(&ls->ls_dir_dump_lock);
kfree(dd); kfree(dd);
} }
out: out:
read_unlock(&ls->ls_masters_lock); read_unlock_bh(&ls->ls_masters_lock);
} }
...@@ -333,6 +333,36 @@ void dlm_hold_rsb(struct dlm_rsb *r) ...@@ -333,6 +333,36 @@ void dlm_hold_rsb(struct dlm_rsb *r)
hold_rsb(r); hold_rsb(r);
} }
/* TODO move this to lib/refcount.c */
static __must_check bool
dlm_refcount_dec_and_lock_bh(refcount_t *r, spinlock_t *lock)
__cond_acquires(lock)
{
if (refcount_dec_not_one(r))
return false;
spin_lock_bh(lock);
if (!refcount_dec_and_test(r)) {
spin_unlock_bh(lock);
return false;
}
return true;
}
/* TODO move this to include/linux/kref.h */
static inline int dlm_kref_put_lock_bh(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
if (dlm_refcount_dec_and_lock_bh(&kref->refcount, lock)) {
release(kref);
return 1;
}
return 0;
}
/* When all references to the rsb are gone it's transferred to /* When all references to the rsb are gone it's transferred to
the tossed list for later disposal. */ the tossed list for later disposal. */
...@@ -342,10 +372,10 @@ static void put_rsb(struct dlm_rsb *r) ...@@ -342,10 +372,10 @@ static void put_rsb(struct dlm_rsb *r)
uint32_t bucket = r->res_bucket; uint32_t bucket = r->res_bucket;
int rv; int rv;
rv = kref_put_lock(&r->res_ref, toss_rsb, rv = dlm_kref_put_lock_bh(&r->res_ref, toss_rsb,
&ls->ls_rsbtbl[bucket].lock); &ls->ls_rsbtbl[bucket].lock);
if (rv) if (rv)
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
} }
void dlm_put_rsb(struct dlm_rsb *r) void dlm_put_rsb(struct dlm_rsb *r)
...@@ -358,17 +388,17 @@ static int pre_rsb_struct(struct dlm_ls *ls) ...@@ -358,17 +388,17 @@ static int pre_rsb_struct(struct dlm_ls *ls)
struct dlm_rsb *r1, *r2; struct dlm_rsb *r1, *r2;
int count = 0; int count = 0;
spin_lock(&ls->ls_new_rsb_spin); spin_lock_bh(&ls->ls_new_rsb_spin);
if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
spin_unlock(&ls->ls_new_rsb_spin); spin_unlock_bh(&ls->ls_new_rsb_spin);
return 0; return 0;
} }
spin_unlock(&ls->ls_new_rsb_spin); spin_unlock_bh(&ls->ls_new_rsb_spin);
r1 = dlm_allocate_rsb(ls); r1 = dlm_allocate_rsb(ls);
r2 = dlm_allocate_rsb(ls); r2 = dlm_allocate_rsb(ls);
spin_lock(&ls->ls_new_rsb_spin); spin_lock_bh(&ls->ls_new_rsb_spin);
if (r1) { if (r1) {
list_add(&r1->res_hashchain, &ls->ls_new_rsb); list_add(&r1->res_hashchain, &ls->ls_new_rsb);
ls->ls_new_rsb_count++; ls->ls_new_rsb_count++;
...@@ -378,7 +408,7 @@ static int pre_rsb_struct(struct dlm_ls *ls) ...@@ -378,7 +408,7 @@ static int pre_rsb_struct(struct dlm_ls *ls)
ls->ls_new_rsb_count++; ls->ls_new_rsb_count++;
} }
count = ls->ls_new_rsb_count; count = ls->ls_new_rsb_count;
spin_unlock(&ls->ls_new_rsb_spin); spin_unlock_bh(&ls->ls_new_rsb_spin);
if (!count) if (!count)
return -ENOMEM; return -ENOMEM;
...@@ -395,10 +425,10 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, ...@@ -395,10 +425,10 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
struct dlm_rsb *r; struct dlm_rsb *r;
int count; int count;
spin_lock(&ls->ls_new_rsb_spin); spin_lock_bh(&ls->ls_new_rsb_spin);
if (list_empty(&ls->ls_new_rsb)) { if (list_empty(&ls->ls_new_rsb)) {
count = ls->ls_new_rsb_count; count = ls->ls_new_rsb_count;
spin_unlock(&ls->ls_new_rsb_spin); spin_unlock_bh(&ls->ls_new_rsb_spin);
log_debug(ls, "find_rsb retry %d %d %s", log_debug(ls, "find_rsb retry %d %d %s",
count, dlm_config.ci_new_rsb_count, count, dlm_config.ci_new_rsb_count,
(const char *)name); (const char *)name);
...@@ -410,7 +440,7 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, ...@@ -410,7 +440,7 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
/* Convert the empty list_head to a NULL rb_node for tree usage: */ /* Convert the empty list_head to a NULL rb_node for tree usage: */
memset(&r->res_hashnode, 0, sizeof(struct rb_node)); memset(&r->res_hashnode, 0, sizeof(struct rb_node));
ls->ls_new_rsb_count--; ls->ls_new_rsb_count--;
spin_unlock(&ls->ls_new_rsb_spin); spin_unlock_bh(&ls->ls_new_rsb_spin);
r->res_ls = ls; r->res_ls = ls;
r->res_length = len; r->res_length = len;
...@@ -585,7 +615,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, ...@@ -585,7 +615,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
goto out; goto out;
} }
spin_lock(&ls->ls_rsbtbl[b].lock); spin_lock_bh(&ls->ls_rsbtbl[b].lock);
error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
if (error) if (error)
...@@ -655,7 +685,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, ...@@ -655,7 +685,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
error = get_rsb_struct(ls, name, len, &r); error = get_rsb_struct(ls, name, len, &r);
if (error == -EAGAIN) { if (error == -EAGAIN) {
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
goto retry; goto retry;
} }
if (error) if (error)
...@@ -704,7 +734,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, ...@@ -704,7 +734,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
out_add: out_add:
error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
out_unlock: out_unlock:
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
out: out:
*r_ret = r; *r_ret = r;
return error; return error;
...@@ -729,7 +759,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, ...@@ -729,7 +759,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
if (error < 0) if (error < 0)
goto out; goto out;
spin_lock(&ls->ls_rsbtbl[b].lock); spin_lock_bh(&ls->ls_rsbtbl[b].lock);
error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
if (error) if (error)
...@@ -787,7 +817,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, ...@@ -787,7 +817,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
error = get_rsb_struct(ls, name, len, &r); error = get_rsb_struct(ls, name, len, &r);
if (error == -EAGAIN) { if (error == -EAGAIN) {
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
goto retry; goto retry;
} }
if (error) if (error)
...@@ -802,7 +832,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, ...@@ -802,7 +832,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
out_unlock: out_unlock:
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
out: out:
*r_ret = r; *r_ret = r;
return error; return error;
...@@ -1019,7 +1049,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, ...@@ -1019,7 +1049,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
if (error < 0) if (error < 0)
return error; return error;
spin_lock(&ls->ls_rsbtbl[b].lock); spin_lock_bh(&ls->ls_rsbtbl[b].lock);
error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
if (!error) { if (!error) {
/* because the rsb is active, we need to lock_rsb before /* because the rsb is active, we need to lock_rsb before
...@@ -1027,7 +1057,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, ...@@ -1027,7 +1057,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
*/ */
hold_rsb(r); hold_rsb(r);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
lock_rsb(r); lock_rsb(r);
__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false, __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
...@@ -1053,14 +1083,14 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, ...@@ -1053,14 +1083,14 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
r->res_toss_time = jiffies; r->res_toss_time = jiffies;
/* the rsb was inactive (on toss list) */ /* the rsb was inactive (on toss list) */
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
return 0; return 0;
not_found: not_found:
error = get_rsb_struct(ls, name, len, &r); error = get_rsb_struct(ls, name, len, &r);
if (error == -EAGAIN) { if (error == -EAGAIN) {
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
goto retry; goto retry;
} }
if (error) if (error)
...@@ -1078,7 +1108,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, ...@@ -1078,7 +1108,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
if (error) { if (error) {
/* should never happen */ /* should never happen */
dlm_free_rsb(r); dlm_free_rsb(r);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
goto retry; goto retry;
} }
...@@ -1086,7 +1116,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, ...@@ -1086,7 +1116,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
*result = DLM_LU_ADD; *result = DLM_LU_ADD;
*r_nodeid = from_nodeid; *r_nodeid = from_nodeid;
out_unlock: out_unlock:
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
return error; return error;
} }
...@@ -1097,13 +1127,13 @@ static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) ...@@ -1097,13 +1127,13 @@ static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
int i; int i;
for (i = 0; i < ls->ls_rsbtbl_size; i++) { for (i = 0; i < ls->ls_rsbtbl_size; i++) {
spin_lock(&ls->ls_rsbtbl[i].lock); spin_lock_bh(&ls->ls_rsbtbl[i].lock);
for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
r = rb_entry(n, struct dlm_rsb, res_hashnode); r = rb_entry(n, struct dlm_rsb, res_hashnode);
if (r->res_hash == hash) if (r->res_hash == hash)
dlm_dump_rsb(r); dlm_dump_rsb(r);
} }
spin_unlock(&ls->ls_rsbtbl[i].lock); spin_unlock_bh(&ls->ls_rsbtbl[i].lock);
} }
} }
...@@ -1116,7 +1146,7 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) ...@@ -1116,7 +1146,7 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len)
hash = jhash(name, len, 0); hash = jhash(name, len, 0);
b = hash & (ls->ls_rsbtbl_size - 1); b = hash & (ls->ls_rsbtbl_size - 1);
spin_lock(&ls->ls_rsbtbl[b].lock); spin_lock_bh(&ls->ls_rsbtbl[b].lock);
error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
if (!error) if (!error)
goto out_dump; goto out_dump;
...@@ -1127,7 +1157,7 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) ...@@ -1127,7 +1157,7 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len)
out_dump: out_dump:
dlm_dump_rsb(r); dlm_dump_rsb(r);
out: out:
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
} }
static void toss_rsb(struct kref *kref) static void toss_rsb(struct kref *kref)
...@@ -1208,11 +1238,11 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, ...@@ -1208,11 +1238,11 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_ownqueue);
INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
spin_lock(&ls->ls_lkbidr_spin); spin_lock_bh(&ls->ls_lkbidr_spin);
rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT);
if (rv >= 0) if (rv >= 0)
lkb->lkb_id = rv; lkb->lkb_id = rv;
spin_unlock(&ls->ls_lkbidr_spin); spin_unlock_bh(&ls->ls_lkbidr_spin);
if (rv < 0) { if (rv < 0) {
log_error(ls, "create_lkb idr error %d", rv); log_error(ls, "create_lkb idr error %d", rv);
...@@ -1233,11 +1263,11 @@ static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) ...@@ -1233,11 +1263,11 @@ static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
{ {
struct dlm_lkb *lkb; struct dlm_lkb *lkb;
spin_lock(&ls->ls_lkbidr_spin); spin_lock_bh(&ls->ls_lkbidr_spin);
lkb = idr_find(&ls->ls_lkbidr, lkid); lkb = idr_find(&ls->ls_lkbidr, lkid);
if (lkb) if (lkb)
kref_get(&lkb->lkb_ref); kref_get(&lkb->lkb_ref);
spin_unlock(&ls->ls_lkbidr_spin); spin_unlock_bh(&ls->ls_lkbidr_spin);
*lkb_ret = lkb; *lkb_ret = lkb;
return lkb ? 0 : -ENOENT; return lkb ? 0 : -ENOENT;
...@@ -1261,11 +1291,11 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) ...@@ -1261,11 +1291,11 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
uint32_t lkid = lkb->lkb_id; uint32_t lkid = lkb->lkb_id;
int rv; int rv;
rv = kref_put_lock(&lkb->lkb_ref, kill_lkb, rv = dlm_kref_put_lock_bh(&lkb->lkb_ref, kill_lkb,
&ls->ls_lkbidr_spin); &ls->ls_lkbidr_spin);
if (rv) { if (rv) {
idr_remove(&ls->ls_lkbidr, lkid); idr_remove(&ls->ls_lkbidr, lkid);
spin_unlock(&ls->ls_lkbidr_spin); spin_unlock_bh(&ls->ls_lkbidr_spin);
detach_lkb(lkb); detach_lkb(lkb);
...@@ -1406,7 +1436,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) ...@@ -1406,7 +1436,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
struct dlm_ls *ls = lkb->lkb_resource->res_ls; struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error = 0; int error = 0;
spin_lock(&ls->ls_waiters_lock); spin_lock_bh(&ls->ls_waiters_lock);
if (is_overlap_unlock(lkb) || if (is_overlap_unlock(lkb) ||
(is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) { (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
...@@ -1449,7 +1479,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) ...@@ -1449,7 +1479,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
log_error(ls, "addwait error %x %d flags %x %d %d %s", log_error(ls, "addwait error %x %d flags %x %d %d %s",
lkb->lkb_id, error, dlm_iflags_val(lkb), mstype, lkb->lkb_id, error, dlm_iflags_val(lkb), mstype,
lkb->lkb_wait_type, lkb->lkb_resource->res_name); lkb->lkb_wait_type, lkb->lkb_resource->res_name);
spin_unlock(&ls->ls_waiters_lock); spin_unlock_bh(&ls->ls_waiters_lock);
return error; return error;
} }
...@@ -1549,9 +1579,9 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype) ...@@ -1549,9 +1579,9 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
struct dlm_ls *ls = lkb->lkb_resource->res_ls; struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error; int error;
spin_lock(&ls->ls_waiters_lock); spin_lock_bh(&ls->ls_waiters_lock);
error = _remove_from_waiters(lkb, mstype, NULL); error = _remove_from_waiters(lkb, mstype, NULL);
spin_unlock(&ls->ls_waiters_lock); spin_unlock_bh(&ls->ls_waiters_lock);
return error; return error;
} }
...@@ -1569,13 +1599,13 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, ...@@ -1569,13 +1599,13 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb,
int error; int error;
if (!local) if (!local)
spin_lock(&ls->ls_waiters_lock); spin_lock_bh(&ls->ls_waiters_lock);
else else
WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) || WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) ||
!dlm_locking_stopped(ls)); !dlm_locking_stopped(ls));
error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
if (!local) if (!local)
spin_unlock(&ls->ls_waiters_lock); spin_unlock_bh(&ls->ls_waiters_lock);
return error; return error;
} }
...@@ -1591,10 +1621,10 @@ static void shrink_bucket(struct dlm_ls *ls, int b) ...@@ -1591,10 +1621,10 @@ static void shrink_bucket(struct dlm_ls *ls, int b)
memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
spin_lock(&ls->ls_rsbtbl[b].lock); spin_lock_bh(&ls->ls_rsbtbl[b].lock);
if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) { if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) {
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
return; return;
} }
...@@ -1651,7 +1681,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) ...@@ -1651,7 +1681,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b)
set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags);
else else
clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
/* /*
* While searching for rsb's to free, we found some that require * While searching for rsb's to free, we found some that require
...@@ -1666,16 +1696,16 @@ static void shrink_bucket(struct dlm_ls *ls, int b) ...@@ -1666,16 +1696,16 @@ static void shrink_bucket(struct dlm_ls *ls, int b)
name = ls->ls_remove_names[i]; name = ls->ls_remove_names[i];
len = ls->ls_remove_lens[i]; len = ls->ls_remove_lens[i];
spin_lock(&ls->ls_rsbtbl[b].lock); spin_lock_bh(&ls->ls_rsbtbl[b].lock);
rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
if (rv) { if (rv) {
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
log_debug(ls, "remove_name not toss %s", name); log_debug(ls, "remove_name not toss %s", name);
continue; continue;
} }
if (r->res_master_nodeid != our_nodeid) { if (r->res_master_nodeid != our_nodeid) {
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
log_debug(ls, "remove_name master %d dir %d our %d %s", log_debug(ls, "remove_name master %d dir %d our %d %s",
r->res_master_nodeid, r->res_dir_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
our_nodeid, name); our_nodeid, name);
...@@ -1684,7 +1714,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) ...@@ -1684,7 +1714,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b)
if (r->res_dir_nodeid == our_nodeid) { if (r->res_dir_nodeid == our_nodeid) {
/* should never happen */ /* should never happen */
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
log_error(ls, "remove_name dir %d master %d our %d %s", log_error(ls, "remove_name dir %d master %d our %d %s",
r->res_dir_nodeid, r->res_master_nodeid, r->res_dir_nodeid, r->res_master_nodeid,
our_nodeid, name); our_nodeid, name);
...@@ -1693,21 +1723,21 @@ static void shrink_bucket(struct dlm_ls *ls, int b) ...@@ -1693,21 +1723,21 @@ static void shrink_bucket(struct dlm_ls *ls, int b)
if (!time_after_eq(jiffies, r->res_toss_time + if (!time_after_eq(jiffies, r->res_toss_time +
dlm_config.ci_toss_secs * HZ)) { dlm_config.ci_toss_secs * HZ)) {
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
log_debug(ls, "remove_name toss_time %lu now %lu %s", log_debug(ls, "remove_name toss_time %lu now %lu %s",
r->res_toss_time, jiffies, name); r->res_toss_time, jiffies, name);
continue; continue;
} }
if (!kref_put(&r->res_ref, kill_rsb)) { if (!kref_put(&r->res_ref, kill_rsb)) {
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
log_error(ls, "remove_name in use %s", name); log_error(ls, "remove_name in use %s", name);
continue; continue;
} }
rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
send_remove(r); send_remove(r);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
dlm_free_rsb(r); dlm_free_rsb(r);
} }
...@@ -4171,7 +4201,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) ...@@ -4171,7 +4201,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
hash = jhash(name, len, 0); hash = jhash(name, len, 0);
b = hash & (ls->ls_rsbtbl_size - 1); b = hash & (ls->ls_rsbtbl_size - 1);
spin_lock(&ls->ls_rsbtbl[b].lock); spin_lock_bh(&ls->ls_rsbtbl[b].lock);
rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
if (rv) { if (rv) {
...@@ -4181,7 +4211,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) ...@@ -4181,7 +4211,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
/* should not happen */ /* should not happen */
log_error(ls, "receive_remove from %d not found %s", log_error(ls, "receive_remove from %d not found %s",
from_nodeid, name); from_nodeid, name);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
return; return;
} }
if (r->res_master_nodeid != from_nodeid) { if (r->res_master_nodeid != from_nodeid) {
...@@ -4189,14 +4219,14 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) ...@@ -4189,14 +4219,14 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
log_error(ls, "receive_remove keep from %d master %d", log_error(ls, "receive_remove keep from %d master %d",
from_nodeid, r->res_master_nodeid); from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r); dlm_print_rsb(r);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
return; return;
} }
log_debug(ls, "receive_remove from %d master %d first %x %s", log_debug(ls, "receive_remove from %d master %d first %x %s",
from_nodeid, r->res_master_nodeid, r->res_first_lkid, from_nodeid, r->res_master_nodeid, r->res_first_lkid,
name); name);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
return; return;
} }
...@@ -4204,19 +4234,19 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) ...@@ -4204,19 +4234,19 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
log_error(ls, "receive_remove toss from %d master %d", log_error(ls, "receive_remove toss from %d master %d",
from_nodeid, r->res_master_nodeid); from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r); dlm_print_rsb(r);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
return; return;
} }
if (kref_put(&r->res_ref, kill_rsb)) { if (kref_put(&r->res_ref, kill_rsb)) {
rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
dlm_free_rsb(r); dlm_free_rsb(r);
} else { } else {
log_error(ls, "receive_remove from %d rsb ref error", log_error(ls, "receive_remove from %d rsb ref error",
from_nodeid); from_nodeid);
dlm_print_rsb(r); dlm_print_rsb(r);
spin_unlock(&ls->ls_rsbtbl[b].lock); spin_unlock_bh(&ls->ls_rsbtbl[b].lock);
} }
} }
...@@ -4752,20 +4782,20 @@ static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms, ...@@ -4752,20 +4782,20 @@ static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms,
int nodeid) int nodeid)
{ {
try_again: try_again:
read_lock(&ls->ls_requestqueue_lock); read_lock_bh(&ls->ls_requestqueue_lock);
if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) {
/* If we were a member of this lockspace, left, and rejoined, /* If we were a member of this lockspace, left, and rejoined,
other nodes may still be sending us messages from the other nodes may still be sending us messages from the
lockspace generation before we left. */ lockspace generation before we left. */
if (WARN_ON_ONCE(!ls->ls_generation)) { if (WARN_ON_ONCE(!ls->ls_generation)) {
read_unlock(&ls->ls_requestqueue_lock); read_unlock_bh(&ls->ls_requestqueue_lock);
log_limit(ls, "receive %d from %d ignore old gen", log_limit(ls, "receive %d from %d ignore old gen",
le32_to_cpu(ms->m_type), nodeid); le32_to_cpu(ms->m_type), nodeid);
return; return;
} }
read_unlock(&ls->ls_requestqueue_lock); read_unlock_bh(&ls->ls_requestqueue_lock);
write_lock(&ls->ls_requestqueue_lock); write_lock_bh(&ls->ls_requestqueue_lock);
/* recheck because we hold writelock now */ /* recheck because we hold writelock now */
if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) {
write_unlock_bh(&ls->ls_requestqueue_lock); write_unlock_bh(&ls->ls_requestqueue_lock);
...@@ -4773,10 +4803,10 @@ static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms, ...@@ -4773,10 +4803,10 @@ static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms,
} }
dlm_add_requestqueue(ls, nodeid, ms); dlm_add_requestqueue(ls, nodeid, ms);
write_unlock(&ls->ls_requestqueue_lock); write_unlock_bh(&ls->ls_requestqueue_lock);
} else { } else {
_receive_message(ls, ms, 0); _receive_message(ls, ms, 0);
read_unlock(&ls->ls_requestqueue_lock); read_unlock_bh(&ls->ls_requestqueue_lock);
} }
} }
...@@ -4836,7 +4866,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid) ...@@ -4836,7 +4866,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid)
/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
be inactive (in this ls) before transitioning to recovery mode */ be inactive (in this ls) before transitioning to recovery mode */
read_lock(&ls->ls_recv_active); read_lock_bh(&ls->ls_recv_active);
if (hd->h_cmd == DLM_MSG) if (hd->h_cmd == DLM_MSG)
dlm_receive_message(ls, &p->message, nodeid); dlm_receive_message(ls, &p->message, nodeid);
else if (hd->h_cmd == DLM_RCOM) else if (hd->h_cmd == DLM_RCOM)
...@@ -4844,7 +4874,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid) ...@@ -4844,7 +4874,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid)
else else
log_error(ls, "invalid h_cmd %d from %d lockspace %x", log_error(ls, "invalid h_cmd %d from %d lockspace %x",
hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
read_unlock(&ls->ls_recv_active); read_unlock_bh(&ls->ls_recv_active);
dlm_put_lockspace(ls); dlm_put_lockspace(ls);
} }
...@@ -5004,7 +5034,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) ...@@ -5004,7 +5034,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{ {
struct dlm_lkb *lkb = NULL, *iter; struct dlm_lkb *lkb = NULL, *iter;
spin_lock(&ls->ls_waiters_lock); spin_lock_bh(&ls->ls_waiters_lock);
list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) { if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) {
hold_lkb(iter); hold_lkb(iter);
...@@ -5012,7 +5042,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) ...@@ -5012,7 +5042,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
break; break;
} }
} }
spin_unlock(&ls->ls_waiters_lock); spin_unlock_bh(&ls->ls_waiters_lock);
return lkb; return lkb;
} }
...@@ -5112,9 +5142,9 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) ...@@ -5112,9 +5142,9 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
} }
/* Forcibly remove from waiters list */ /* Forcibly remove from waiters list */
spin_lock(&ls->ls_waiters_lock); spin_lock_bh(&ls->ls_waiters_lock);
list_del_init(&lkb->lkb_wait_reply); list_del_init(&lkb->lkb_wait_reply);
spin_unlock(&ls->ls_waiters_lock); spin_unlock_bh(&ls->ls_waiters_lock);
/* /*
* The lkb is now clear of all prior waiters state and can be * The lkb is now clear of all prior waiters state and can be
...@@ -5284,7 +5314,7 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) ...@@ -5284,7 +5314,7 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
struct rb_node *n; struct rb_node *n;
struct dlm_rsb *r; struct dlm_rsb *r;
spin_lock(&ls->ls_rsbtbl[bucket].lock); spin_lock_bh(&ls->ls_rsbtbl[bucket].lock);
for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
r = rb_entry(n, struct dlm_rsb, res_hashnode); r = rb_entry(n, struct dlm_rsb, res_hashnode);
...@@ -5295,10 +5325,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) ...@@ -5295,10 +5325,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
continue; continue;
} }
hold_rsb(r); hold_rsb(r);
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
return r; return r;
} }
spin_unlock(&ls->ls_rsbtbl[bucket].lock); spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock);
return NULL; return NULL;
} }
...@@ -5642,10 +5672,10 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, ...@@ -5642,10 +5672,10 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
} }
/* add this new lkb to the per-process list of locks */ /* add this new lkb to the per-process list of locks */
spin_lock(&ua->proc->locks_spin); spin_lock_bh(&ua->proc->locks_spin);
hold_lkb(lkb); hold_lkb(lkb);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
spin_unlock(&ua->proc->locks_spin); spin_unlock_bh(&ua->proc->locks_spin);
do_put = false; do_put = false;
out_put: out_put:
trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false); trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
...@@ -5775,9 +5805,9 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ...@@ -5775,9 +5805,9 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
* for the proc locks list. * for the proc locks list.
*/ */
spin_lock(&ua->proc->locks_spin); spin_lock_bh(&ua->proc->locks_spin);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
spin_unlock(&ua->proc->locks_spin); spin_unlock_bh(&ua->proc->locks_spin);
out: out:
kfree(ua_tmp); kfree(ua_tmp);
return rv; return rv;
...@@ -5821,11 +5851,11 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ...@@ -5821,11 +5851,11 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error) if (error)
goto out_put; goto out_put;
spin_lock(&ua->proc->locks_spin); spin_lock_bh(&ua->proc->locks_spin);
/* dlm_user_add_cb() may have already taken lkb off the proc list */ /* dlm_user_add_cb() may have already taken lkb off the proc list */
if (!list_empty(&lkb->lkb_ownqueue)) if (!list_empty(&lkb->lkb_ownqueue))
list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
spin_unlock(&ua->proc->locks_spin); spin_unlock_bh(&ua->proc->locks_spin);
out_put: out_put:
trace_dlm_unlock_end(ls, lkb, flags, error); trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb); dlm_put_lkb(lkb);
...@@ -5976,7 +6006,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, ...@@ -5976,7 +6006,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
{ {
struct dlm_lkb *lkb = NULL; struct dlm_lkb *lkb = NULL;
spin_lock(&ls->ls_clear_proc_locks); spin_lock_bh(&ls->ls_clear_proc_locks);
if (list_empty(&proc->locks)) if (list_empty(&proc->locks))
goto out; goto out;
...@@ -5988,7 +6018,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, ...@@ -5988,7 +6018,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
else else
set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
out: out:
spin_unlock(&ls->ls_clear_proc_locks); spin_unlock_bh(&ls->ls_clear_proc_locks);
return lkb; return lkb;
} }
...@@ -6025,7 +6055,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) ...@@ -6025,7 +6055,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_put_lkb(lkb); dlm_put_lkb(lkb);
} }
spin_lock(&ls->ls_clear_proc_locks); spin_lock_bh(&ls->ls_clear_proc_locks);
/* in-progress unlocks */ /* in-progress unlocks */
list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
...@@ -6039,7 +6069,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) ...@@ -6039,7 +6069,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_free_cb(cb); dlm_free_cb(cb);
} }
spin_unlock(&ls->ls_clear_proc_locks); spin_unlock_bh(&ls->ls_clear_proc_locks);
dlm_unlock_recovery(ls); dlm_unlock_recovery(ls);
} }
...@@ -6050,13 +6080,13 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) ...@@ -6050,13 +6080,13 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
while (1) { while (1) {
lkb = NULL; lkb = NULL;
spin_lock(&proc->locks_spin); spin_lock_bh(&proc->locks_spin);
if (!list_empty(&proc->locks)) { if (!list_empty(&proc->locks)) {
lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb = list_entry(proc->locks.next, struct dlm_lkb,
lkb_ownqueue); lkb_ownqueue);
list_del_init(&lkb->lkb_ownqueue); list_del_init(&lkb->lkb_ownqueue);
} }
spin_unlock(&proc->locks_spin); spin_unlock_bh(&proc->locks_spin);
if (!lkb) if (!lkb)
break; break;
...@@ -6066,20 +6096,20 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) ...@@ -6066,20 +6096,20 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_put_lkb(lkb); /* ref from proc->locks list */ dlm_put_lkb(lkb); /* ref from proc->locks list */
} }
spin_lock(&proc->locks_spin); spin_lock_bh(&proc->locks_spin);
list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
list_del_init(&lkb->lkb_ownqueue); list_del_init(&lkb->lkb_ownqueue);
set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
dlm_put_lkb(lkb); dlm_put_lkb(lkb);
} }
spin_unlock(&proc->locks_spin); spin_unlock_bh(&proc->locks_spin);
spin_lock(&proc->asts_spin); spin_lock_bh(&proc->asts_spin);
list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
list_del(&cb->list); list_del(&cb->list);
dlm_free_cb(cb); dlm_free_cb(cb);
} }
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
} }
/* pid of 0 means purge all orphans */ /* pid of 0 means purge all orphans */
......
...@@ -69,12 +69,12 @@ static inline int is_master(struct dlm_rsb *r) ...@@ -69,12 +69,12 @@ static inline int is_master(struct dlm_rsb *r)
static inline void lock_rsb(struct dlm_rsb *r) static inline void lock_rsb(struct dlm_rsb *r)
{ {
spin_lock(&r->res_lock); spin_lock_bh(&r->res_lock);
} }
static inline void unlock_rsb(struct dlm_rsb *r) static inline void unlock_rsb(struct dlm_rsb *r)
{ {
spin_unlock(&r->res_lock); spin_unlock_bh(&r->res_lock);
} }
#endif #endif
......
...@@ -251,15 +251,15 @@ static struct dlm_ls *find_ls_to_scan(void) ...@@ -251,15 +251,15 @@ static struct dlm_ls *find_ls_to_scan(void)
{ {
struct dlm_ls *ls; struct dlm_ls *ls;
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) { list_for_each_entry(ls, &lslist, ls_list) {
if (time_after_eq(jiffies, ls->ls_scan_time + if (time_after_eq(jiffies, ls->ls_scan_time +
dlm_config.ci_scan_secs * HZ)) { dlm_config.ci_scan_secs * HZ)) {
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
return ls; return ls;
} }
} }
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
return NULL; return NULL;
} }
...@@ -306,7 +306,7 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id) ...@@ -306,7 +306,7 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
{ {
struct dlm_ls *ls; struct dlm_ls *ls;
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) { list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_global_id == id) { if (ls->ls_global_id == id) {
...@@ -316,7 +316,7 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id) ...@@ -316,7 +316,7 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
} }
ls = NULL; ls = NULL;
out: out:
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
return ls; return ls;
} }
...@@ -324,7 +324,7 @@ struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace) ...@@ -324,7 +324,7 @@ struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
{ {
struct dlm_ls *ls; struct dlm_ls *ls;
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) { list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_local_handle == lockspace) { if (ls->ls_local_handle == lockspace) {
atomic_inc(&ls->ls_count); atomic_inc(&ls->ls_count);
...@@ -333,7 +333,7 @@ struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace) ...@@ -333,7 +333,7 @@ struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
} }
ls = NULL; ls = NULL;
out: out:
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
return ls; return ls;
} }
...@@ -341,7 +341,7 @@ struct dlm_ls *dlm_find_lockspace_device(int minor) ...@@ -341,7 +341,7 @@ struct dlm_ls *dlm_find_lockspace_device(int minor)
{ {
struct dlm_ls *ls; struct dlm_ls *ls;
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) { list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_device.minor == minor) { if (ls->ls_device.minor == minor) {
atomic_inc(&ls->ls_count); atomic_inc(&ls->ls_count);
...@@ -350,7 +350,7 @@ struct dlm_ls *dlm_find_lockspace_device(int minor) ...@@ -350,7 +350,7 @@ struct dlm_ls *dlm_find_lockspace_device(int minor)
} }
ls = NULL; ls = NULL;
out: out:
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
return ls; return ls;
} }
...@@ -365,15 +365,15 @@ static void remove_lockspace(struct dlm_ls *ls) ...@@ -365,15 +365,15 @@ static void remove_lockspace(struct dlm_ls *ls)
retry: retry:
wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0); wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0);
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
if (atomic_read(&ls->ls_count) != 0) { if (atomic_read(&ls->ls_count) != 0) {
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
goto retry; goto retry;
} }
WARN_ON(ls->ls_create_count != 0); WARN_ON(ls->ls_create_count != 0);
list_del(&ls->ls_list); list_del(&ls->ls_list);
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
} }
static int threads_start(void) static int threads_start(void)
...@@ -448,7 +448,7 @@ static int new_lockspace(const char *name, const char *cluster, ...@@ -448,7 +448,7 @@ static int new_lockspace(const char *name, const char *cluster,
error = 0; error = 0;
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) { list_for_each_entry(ls, &lslist, ls_list) {
WARN_ON(ls->ls_create_count <= 0); WARN_ON(ls->ls_create_count <= 0);
if (ls->ls_namelen != namelen) if (ls->ls_namelen != namelen)
...@@ -464,7 +464,7 @@ static int new_lockspace(const char *name, const char *cluster, ...@@ -464,7 +464,7 @@ static int new_lockspace(const char *name, const char *cluster,
error = 1; error = 1;
break; break;
} }
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
if (error) if (error)
goto out; goto out;
...@@ -583,10 +583,10 @@ static int new_lockspace(const char *name, const char *cluster, ...@@ -583,10 +583,10 @@ static int new_lockspace(const char *name, const char *cluster,
INIT_LIST_HEAD(&ls->ls_dir_dump_list); INIT_LIST_HEAD(&ls->ls_dir_dump_list);
rwlock_init(&ls->ls_dir_dump_lock); rwlock_init(&ls->ls_dir_dump_lock);
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
ls->ls_create_count = 1; ls->ls_create_count = 1;
list_add(&ls->ls_list, &lslist); list_add(&ls->ls_list, &lslist);
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
if (flags & DLM_LSFL_FS) { if (flags & DLM_LSFL_FS) {
error = dlm_callback_start(ls); error = dlm_callback_start(ls);
...@@ -655,9 +655,9 @@ static int new_lockspace(const char *name, const char *cluster, ...@@ -655,9 +655,9 @@ static int new_lockspace(const char *name, const char *cluster,
out_callback: out_callback:
dlm_callback_stop(ls); dlm_callback_stop(ls);
out_delist: out_delist:
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
list_del(&ls->ls_list); list_del(&ls->ls_list);
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
idr_destroy(&ls->ls_recover_idr); idr_destroy(&ls->ls_recover_idr);
kfree(ls->ls_recover_buf); kfree(ls->ls_recover_buf);
out_lkbidr: out_lkbidr:
...@@ -756,7 +756,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force) ...@@ -756,7 +756,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
{ {
int rv; int rv;
spin_lock(&ls->ls_lkbidr_spin); spin_lock_bh(&ls->ls_lkbidr_spin);
if (force == 0) { if (force == 0) {
rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
} else if (force == 1) { } else if (force == 1) {
...@@ -764,7 +764,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force) ...@@ -764,7 +764,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
} else { } else {
rv = 0; rv = 0;
} }
spin_unlock(&ls->ls_lkbidr_spin); spin_unlock_bh(&ls->ls_lkbidr_spin);
return rv; return rv;
} }
...@@ -776,7 +776,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) ...@@ -776,7 +776,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
busy = lockspace_busy(ls, force); busy = lockspace_busy(ls, force);
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
if (ls->ls_create_count == 1) { if (ls->ls_create_count == 1) {
if (busy) { if (busy) {
rv = -EBUSY; rv = -EBUSY;
...@@ -790,7 +790,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) ...@@ -790,7 +790,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
} else { } else {
rv = -EINVAL; rv = -EINVAL;
} }
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
if (rv) { if (rv) {
log_debug(ls, "release_lockspace no remove %d", rv); log_debug(ls, "release_lockspace no remove %d", rv);
...@@ -918,20 +918,19 @@ void dlm_stop_lockspaces(void) ...@@ -918,20 +918,19 @@ void dlm_stop_lockspaces(void)
restart: restart:
count = 0; count = 0;
spin_lock(&lslist_lock); spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) { list_for_each_entry(ls, &lslist, ls_list) {
if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
count++; count++;
continue; continue;
} }
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
log_error(ls, "no userland control daemon, stopping lockspace"); log_error(ls, "no userland control daemon, stopping lockspace");
dlm_ls_stop(ls); dlm_ls_stop(ls);
goto restart; goto restart;
} }
spin_unlock(&lslist_lock); spin_unlock_bh(&lslist_lock);
if (count) if (count)
log_print("dlm user daemon left %d lockspaces", count); log_print("dlm user daemon left %d lockspaces", count);
} }
...@@ -867,36 +867,36 @@ static void process_dlm_messages(struct work_struct *work) ...@@ -867,36 +867,36 @@ static void process_dlm_messages(struct work_struct *work)
{ {
struct processqueue_entry *pentry; struct processqueue_entry *pentry;
spin_lock(&processqueue_lock); spin_lock_bh(&processqueue_lock);
pentry = list_first_entry_or_null(&processqueue, pentry = list_first_entry_or_null(&processqueue,
struct processqueue_entry, list); struct processqueue_entry, list);
if (WARN_ON_ONCE(!pentry)) { if (WARN_ON_ONCE(!pentry)) {
process_dlm_messages_pending = false; process_dlm_messages_pending = false;
spin_unlock(&processqueue_lock); spin_unlock_bh(&processqueue_lock);
return; return;
} }
list_del(&pentry->list); list_del(&pentry->list);
atomic_dec(&processqueue_count); atomic_dec(&processqueue_count);
spin_unlock(&processqueue_lock); spin_unlock_bh(&processqueue_lock);
for (;;) { for (;;) {
dlm_process_incoming_buffer(pentry->nodeid, pentry->buf, dlm_process_incoming_buffer(pentry->nodeid, pentry->buf,
pentry->buflen); pentry->buflen);
free_processqueue_entry(pentry); free_processqueue_entry(pentry);
spin_lock(&processqueue_lock); spin_lock_bh(&processqueue_lock);
pentry = list_first_entry_or_null(&processqueue, pentry = list_first_entry_or_null(&processqueue,
struct processqueue_entry, list); struct processqueue_entry, list);
if (!pentry) { if (!pentry) {
process_dlm_messages_pending = false; process_dlm_messages_pending = false;
spin_unlock(&processqueue_lock); spin_unlock_bh(&processqueue_lock);
break; break;
} }
list_del(&pentry->list); list_del(&pentry->list);
atomic_dec(&processqueue_count); atomic_dec(&processqueue_count);
spin_unlock(&processqueue_lock); spin_unlock_bh(&processqueue_lock);
} }
} }
...@@ -966,14 +966,14 @@ static int receive_from_sock(struct connection *con, int buflen) ...@@ -966,14 +966,14 @@ static int receive_from_sock(struct connection *con, int buflen)
memmove(con->rx_leftover_buf, pentry->buf + ret, memmove(con->rx_leftover_buf, pentry->buf + ret,
con->rx_leftover); con->rx_leftover);
spin_lock(&processqueue_lock); spin_lock_bh(&processqueue_lock);
ret = atomic_inc_return(&processqueue_count); ret = atomic_inc_return(&processqueue_count);
list_add_tail(&pentry->list, &processqueue); list_add_tail(&pentry->list, &processqueue);
if (!process_dlm_messages_pending) { if (!process_dlm_messages_pending) {
process_dlm_messages_pending = true; process_dlm_messages_pending = true;
queue_work(process_workqueue, &process_work); queue_work(process_workqueue, &process_work);
} }
spin_unlock(&processqueue_lock); spin_unlock_bh(&processqueue_lock);
if (ret > DLM_MAX_PROCESS_BUFFERS) if (ret > DLM_MAX_PROCESS_BUFFERS)
return DLM_IO_FLUSH; return DLM_IO_FLUSH;
......
...@@ -630,7 +630,7 @@ int dlm_ls_stop(struct dlm_ls *ls) ...@@ -630,7 +630,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
* message to the requestqueue without races. * message to the requestqueue without races.
*/ */
write_lock(&ls->ls_recv_active); write_lock_bh(&ls->ls_recv_active);
/* /*
* Abort any recovery that's in progress (see RECOVER_STOP, * Abort any recovery that's in progress (see RECOVER_STOP,
...@@ -638,23 +638,23 @@ int dlm_ls_stop(struct dlm_ls *ls) ...@@ -638,23 +638,23 @@ int dlm_ls_stop(struct dlm_ls *ls)
* dlm to quit any processing (see RUNNING, dlm_locking_stopped()). * dlm to quit any processing (see RUNNING, dlm_locking_stopped()).
*/ */
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); set_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
ls->ls_recover_seq++; ls->ls_recover_seq++;
/* activate requestqueue and stop processing */ /* activate requestqueue and stop processing */
write_lock(&ls->ls_requestqueue_lock); write_lock_bh(&ls->ls_requestqueue_lock);
set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags);
write_unlock(&ls->ls_requestqueue_lock); write_unlock_bh(&ls->ls_requestqueue_lock);
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
/* /*
* Let dlm_recv run again, now any normal messages will be saved on the * Let dlm_recv run again, now any normal messages will be saved on the
* requestqueue for later. * requestqueue for later.
*/ */
write_unlock(&ls->ls_recv_active); write_unlock_bh(&ls->ls_recv_active);
/* /*
* This in_recovery lock does two things: * This in_recovery lock does two things:
...@@ -679,13 +679,13 @@ int dlm_ls_stop(struct dlm_ls *ls) ...@@ -679,13 +679,13 @@ int dlm_ls_stop(struct dlm_ls *ls)
dlm_recoverd_suspend(ls); dlm_recoverd_suspend(ls);
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
kfree(ls->ls_slots); kfree(ls->ls_slots);
ls->ls_slots = NULL; ls->ls_slots = NULL;
ls->ls_num_slots = 0; ls->ls_num_slots = 0;
ls->ls_slots_size = 0; ls->ls_slots_size = 0;
ls->ls_recover_status = 0; ls->ls_recover_status = 0;
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
dlm_recoverd_resume(ls); dlm_recoverd_resume(ls);
...@@ -719,12 +719,12 @@ int dlm_ls_start(struct dlm_ls *ls) ...@@ -719,12 +719,12 @@ int dlm_ls_start(struct dlm_ls *ls)
if (error < 0) if (error < 0)
goto fail_rv; goto fail_rv;
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
/* the lockspace needs to be stopped before it can be started */ /* the lockspace needs to be stopped before it can be started */
if (!dlm_locking_stopped(ls)) { if (!dlm_locking_stopped(ls)) {
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
log_error(ls, "start ignored: lockspace running"); log_error(ls, "start ignored: lockspace running");
error = -EINVAL; error = -EINVAL;
goto fail; goto fail;
...@@ -735,7 +735,7 @@ int dlm_ls_start(struct dlm_ls *ls) ...@@ -735,7 +735,7 @@ int dlm_ls_start(struct dlm_ls *ls)
rv->seq = ++ls->ls_recover_seq; rv->seq = ++ls->ls_recover_seq;
rv_old = ls->ls_recover_args; rv_old = ls->ls_recover_args;
ls->ls_recover_args = rv; ls->ls_recover_args = rv;
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
if (rv_old) { if (rv_old) {
log_error(ls, "unused recovery %llx %d", log_error(ls, "unused recovery %llx %d",
......
...@@ -364,9 +364,9 @@ int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) ...@@ -364,9 +364,9 @@ int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
node->users = 0; node->users = 0;
midcomms_node_reset(node); midcomms_node_reset(node);
spin_lock(&nodes_lock); spin_lock_bh(&nodes_lock);
hlist_add_head_rcu(&node->hlist, &node_hash[r]); hlist_add_head_rcu(&node->hlist, &node_hash[r]);
spin_unlock(&nodes_lock); spin_unlock_bh(&nodes_lock);
node->debugfs = dlm_create_debug_comms_file(nodeid, node); node->debugfs = dlm_create_debug_comms_file(nodeid, node);
return 0; return 0;
...@@ -477,7 +477,7 @@ static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq) ...@@ -477,7 +477,7 @@ static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
static void dlm_pas_fin_ack_rcv(struct midcomms_node *node) static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
{ {
spin_lock(&node->state_lock); spin_lock_bh(&node->state_lock);
pr_debug("receive passive fin ack from node %d with state %s\n", pr_debug("receive passive fin ack from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state)); node->nodeid, dlm_state_str(node->state));
...@@ -491,13 +491,13 @@ static void dlm_pas_fin_ack_rcv(struct midcomms_node *node) ...@@ -491,13 +491,13 @@ static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
wake_up(&node->shutdown_wait); wake_up(&node->shutdown_wait);
break; break;
default: default:
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
log_print("%s: unexpected state: %d", log_print("%s: unexpected state: %d",
__func__, node->state); __func__, node->state);
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return; return;
} }
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
} }
static void dlm_receive_buffer_3_2_trace(uint32_t seq, static void dlm_receive_buffer_3_2_trace(uint32_t seq,
...@@ -534,7 +534,7 @@ static void dlm_midcomms_receive_buffer(const union dlm_packet *p, ...@@ -534,7 +534,7 @@ static void dlm_midcomms_receive_buffer(const union dlm_packet *p,
if (is_expected_seq) { if (is_expected_seq) {
switch (p->header.h_cmd) { switch (p->header.h_cmd) {
case DLM_FIN: case DLM_FIN:
spin_lock(&node->state_lock); spin_lock_bh(&node->state_lock);
pr_debug("receive fin msg from node %d with state %s\n", pr_debug("receive fin msg from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state)); node->nodeid, dlm_state_str(node->state));
...@@ -575,13 +575,13 @@ static void dlm_midcomms_receive_buffer(const union dlm_packet *p, ...@@ -575,13 +575,13 @@ static void dlm_midcomms_receive_buffer(const union dlm_packet *p,
/* probably remove_member caught it, do nothing */ /* probably remove_member caught it, do nothing */
break; break;
default: default:
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
log_print("%s: unexpected state: %d", log_print("%s: unexpected state: %d",
__func__, node->state); __func__, node->state);
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return; return;
} }
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
break; break;
default: default:
WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
...@@ -1182,7 +1182,7 @@ void dlm_midcomms_exit(void) ...@@ -1182,7 +1182,7 @@ void dlm_midcomms_exit(void)
static void dlm_act_fin_ack_rcv(struct midcomms_node *node) static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
{ {
spin_lock(&node->state_lock); spin_lock_bh(&node->state_lock);
pr_debug("receive active fin ack from node %d with state %s\n", pr_debug("receive active fin ack from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state)); node->nodeid, dlm_state_str(node->state));
...@@ -1202,13 +1202,13 @@ static void dlm_act_fin_ack_rcv(struct midcomms_node *node) ...@@ -1202,13 +1202,13 @@ static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
wake_up(&node->shutdown_wait); wake_up(&node->shutdown_wait);
break; break;
default: default:
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
log_print("%s: unexpected state: %d", log_print("%s: unexpected state: %d",
__func__, node->state); __func__, node->state);
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return; return;
} }
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
} }
void dlm_midcomms_add_member(int nodeid) void dlm_midcomms_add_member(int nodeid)
...@@ -1223,7 +1223,7 @@ void dlm_midcomms_add_member(int nodeid) ...@@ -1223,7 +1223,7 @@ void dlm_midcomms_add_member(int nodeid)
return; return;
} }
spin_lock(&node->state_lock); spin_lock_bh(&node->state_lock);
if (!node->users) { if (!node->users) {
pr_debug("receive add member from node %d with state %s\n", pr_debug("receive add member from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state)); node->nodeid, dlm_state_str(node->state));
...@@ -1251,7 +1251,7 @@ void dlm_midcomms_add_member(int nodeid) ...@@ -1251,7 +1251,7 @@ void dlm_midcomms_add_member(int nodeid)
node->users++; node->users++;
pr_debug("node %d users inc count %d\n", nodeid, node->users); pr_debug("node %d users inc count %d\n", nodeid, node->users);
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
srcu_read_unlock(&nodes_srcu, idx); srcu_read_unlock(&nodes_srcu, idx);
} }
...@@ -1269,13 +1269,13 @@ void dlm_midcomms_remove_member(int nodeid) ...@@ -1269,13 +1269,13 @@ void dlm_midcomms_remove_member(int nodeid)
return; return;
} }
spin_lock(&node->state_lock); spin_lock_bh(&node->state_lock);
/* case of dlm_midcomms_addr() created node but /* case of dlm_midcomms_addr() created node but
* was not added before because dlm_midcomms_close() * was not added before because dlm_midcomms_close()
* removed the node * removed the node
*/ */
if (!node->users) { if (!node->users) {
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
srcu_read_unlock(&nodes_srcu, idx); srcu_read_unlock(&nodes_srcu, idx);
return; return;
} }
...@@ -1313,7 +1313,7 @@ void dlm_midcomms_remove_member(int nodeid) ...@@ -1313,7 +1313,7 @@ void dlm_midcomms_remove_member(int nodeid)
break; break;
} }
} }
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
srcu_read_unlock(&nodes_srcu, idx); srcu_read_unlock(&nodes_srcu, idx);
} }
...@@ -1351,7 +1351,7 @@ static void midcomms_shutdown(struct midcomms_node *node) ...@@ -1351,7 +1351,7 @@ static void midcomms_shutdown(struct midcomms_node *node)
return; return;
} }
spin_lock(&node->state_lock); spin_lock_bh(&node->state_lock);
pr_debug("receive active shutdown for node %d with state %s\n", pr_debug("receive active shutdown for node %d with state %s\n",
node->nodeid, dlm_state_str(node->state)); node->nodeid, dlm_state_str(node->state));
switch (node->state) { switch (node->state) {
...@@ -1370,7 +1370,7 @@ static void midcomms_shutdown(struct midcomms_node *node) ...@@ -1370,7 +1370,7 @@ static void midcomms_shutdown(struct midcomms_node *node)
*/ */
break; break;
} }
spin_unlock(&node->state_lock); spin_unlock_bh(&node->state_lock);
if (DLM_DEBUG_FENCE_TERMINATION) if (DLM_DEBUG_FENCE_TERMINATION)
msleep(5000); msleep(5000);
...@@ -1441,9 +1441,9 @@ int dlm_midcomms_close(int nodeid) ...@@ -1441,9 +1441,9 @@ int dlm_midcomms_close(int nodeid)
ret = dlm_lowcomms_close(nodeid); ret = dlm_lowcomms_close(nodeid);
dlm_delete_debug_comms_file(node->debugfs); dlm_delete_debug_comms_file(node->debugfs);
spin_lock(&nodes_lock); spin_lock_bh(&nodes_lock);
hlist_del_rcu(&node->hlist); hlist_del_rcu(&node->hlist);
spin_unlock(&nodes_lock); spin_unlock_bh(&nodes_lock);
srcu_read_unlock(&nodes_srcu, idx); srcu_read_unlock(&nodes_srcu, idx);
/* wait that all readers left until flush send queue */ /* wait that all readers left until flush send queue */
......
...@@ -143,18 +143,18 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) ...@@ -143,18 +143,18 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq) static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq)
{ {
spin_lock(&ls->ls_rcom_spin); spin_lock_bh(&ls->ls_rcom_spin);
*new_seq = cpu_to_le64(++ls->ls_rcom_seq); *new_seq = cpu_to_le64(++ls->ls_rcom_seq);
set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
spin_unlock(&ls->ls_rcom_spin); spin_unlock_bh(&ls->ls_rcom_spin);
} }
static void disallow_sync_reply(struct dlm_ls *ls) static void disallow_sync_reply(struct dlm_ls *ls)
{ {
spin_lock(&ls->ls_rcom_spin); spin_lock_bh(&ls->ls_rcom_spin);
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
clear_bit(LSFL_RCOM_READY, &ls->ls_flags); clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
spin_unlock(&ls->ls_rcom_spin); spin_unlock_bh(&ls->ls_rcom_spin);
} }
/* /*
...@@ -245,10 +245,10 @@ static void receive_rcom_status(struct dlm_ls *ls, ...@@ -245,10 +245,10 @@ static void receive_rcom_status(struct dlm_ls *ls,
goto do_create; goto do_create;
} }
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status; status = ls->ls_recover_status;
num_slots = ls->ls_num_slots; num_slots = ls->ls_num_slots;
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
len += num_slots * sizeof(struct rcom_slot); len += num_slots * sizeof(struct rcom_slot);
do_create: do_create:
...@@ -266,9 +266,9 @@ static void receive_rcom_status(struct dlm_ls *ls, ...@@ -266,9 +266,9 @@ static void receive_rcom_status(struct dlm_ls *ls,
if (!num_slots) if (!num_slots)
goto do_send; goto do_send;
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
if (ls->ls_num_slots != num_slots) { if (ls->ls_num_slots != num_slots) {
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
log_debug(ls, "receive_rcom_status num_slots %d to %d", log_debug(ls, "receive_rcom_status num_slots %d to %d",
num_slots, ls->ls_num_slots); num_slots, ls->ls_num_slots);
rc->rc_result = 0; rc->rc_result = 0;
...@@ -277,7 +277,7 @@ static void receive_rcom_status(struct dlm_ls *ls, ...@@ -277,7 +277,7 @@ static void receive_rcom_status(struct dlm_ls *ls,
} }
dlm_slots_copy_out(ls, rc); dlm_slots_copy_out(ls, rc);
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
do_send: do_send:
send_rcom_stateless(msg, rc); send_rcom_stateless(msg, rc);
...@@ -285,7 +285,7 @@ static void receive_rcom_status(struct dlm_ls *ls, ...@@ -285,7 +285,7 @@ static void receive_rcom_status(struct dlm_ls *ls,
static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in) static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in)
{ {
spin_lock(&ls->ls_rcom_spin); spin_lock_bh(&ls->ls_rcom_spin);
if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) { le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) {
log_debug(ls, "reject reply %d from %d seq %llx expect %llx", log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
...@@ -301,7 +301,7 @@ static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in) ...@@ -301,7 +301,7 @@ static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in)
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
wake_up(&ls->ls_wait_general); wake_up(&ls->ls_wait_general);
out: out:
spin_unlock(&ls->ls_rcom_spin); spin_unlock_bh(&ls->ls_rcom_spin);
} }
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,
...@@ -613,11 +613,11 @@ void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc, int nodeid) ...@@ -613,11 +613,11 @@ void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc, int nodeid)
break; break;
} }
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status; status = ls->ls_recover_status;
stop = dlm_recovery_stopped(ls); stop = dlm_recovery_stopped(ls);
seq = ls->ls_recover_seq; seq = ls->ls_recover_seq;
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS))) if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS)))
goto ignore; goto ignore;
......
...@@ -74,9 +74,9 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) ...@@ -74,9 +74,9 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
uint32_t dlm_recover_status(struct dlm_ls *ls) uint32_t dlm_recover_status(struct dlm_ls *ls)
{ {
uint32_t status; uint32_t status;
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status; status = ls->ls_recover_status;
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
return status; return status;
} }
...@@ -87,9 +87,9 @@ static void _set_recover_status(struct dlm_ls *ls, uint32_t status) ...@@ -87,9 +87,9 @@ static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
{ {
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
_set_recover_status(ls, status); _set_recover_status(ls, status);
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
} }
static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
...@@ -188,13 +188,13 @@ int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq) ...@@ -188,13 +188,13 @@ int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq)
rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
if (!rv) { if (!rv) {
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
_set_recover_status(ls, DLM_RS_NODES_ALL); _set_recover_status(ls, DLM_RS_NODES_ALL);
ls->ls_num_slots = num_slots; ls->ls_num_slots = num_slots;
ls->ls_slots_size = slots_size; ls->ls_slots_size = slots_size;
ls->ls_slots = slots; ls->ls_slots = slots;
ls->ls_generation = gen; ls->ls_generation = gen;
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
} else { } else {
dlm_set_recover_status(ls, DLM_RS_NODES_ALL); dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
} }
...@@ -241,9 +241,9 @@ static int recover_list_empty(struct dlm_ls *ls) ...@@ -241,9 +241,9 @@ static int recover_list_empty(struct dlm_ls *ls)
{ {
int empty; int empty;
spin_lock(&ls->ls_recover_list_lock); spin_lock_bh(&ls->ls_recover_list_lock);
empty = list_empty(&ls->ls_recover_list); empty = list_empty(&ls->ls_recover_list);
spin_unlock(&ls->ls_recover_list_lock); spin_unlock_bh(&ls->ls_recover_list_lock);
return empty; return empty;
} }
...@@ -252,23 +252,23 @@ static void recover_list_add(struct dlm_rsb *r) ...@@ -252,23 +252,23 @@ static void recover_list_add(struct dlm_rsb *r)
{ {
struct dlm_ls *ls = r->res_ls; struct dlm_ls *ls = r->res_ls;
spin_lock(&ls->ls_recover_list_lock); spin_lock_bh(&ls->ls_recover_list_lock);
if (list_empty(&r->res_recover_list)) { if (list_empty(&r->res_recover_list)) {
list_add_tail(&r->res_recover_list, &ls->ls_recover_list); list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
ls->ls_recover_list_count++; ls->ls_recover_list_count++;
dlm_hold_rsb(r); dlm_hold_rsb(r);
} }
spin_unlock(&ls->ls_recover_list_lock); spin_unlock_bh(&ls->ls_recover_list_lock);
} }
static void recover_list_del(struct dlm_rsb *r) static void recover_list_del(struct dlm_rsb *r)
{ {
struct dlm_ls *ls = r->res_ls; struct dlm_ls *ls = r->res_ls;
spin_lock(&ls->ls_recover_list_lock); spin_lock_bh(&ls->ls_recover_list_lock);
list_del_init(&r->res_recover_list); list_del_init(&r->res_recover_list);
ls->ls_recover_list_count--; ls->ls_recover_list_count--;
spin_unlock(&ls->ls_recover_list_lock); spin_unlock_bh(&ls->ls_recover_list_lock);
dlm_put_rsb(r); dlm_put_rsb(r);
} }
...@@ -277,7 +277,7 @@ static void recover_list_clear(struct dlm_ls *ls) ...@@ -277,7 +277,7 @@ static void recover_list_clear(struct dlm_ls *ls)
{ {
struct dlm_rsb *r, *s; struct dlm_rsb *r, *s;
spin_lock(&ls->ls_recover_list_lock); spin_lock_bh(&ls->ls_recover_list_lock);
list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
list_del_init(&r->res_recover_list); list_del_init(&r->res_recover_list);
r->res_recover_locks_count = 0; r->res_recover_locks_count = 0;
...@@ -290,17 +290,17 @@ static void recover_list_clear(struct dlm_ls *ls) ...@@ -290,17 +290,17 @@ static void recover_list_clear(struct dlm_ls *ls)
ls->ls_recover_list_count); ls->ls_recover_list_count);
ls->ls_recover_list_count = 0; ls->ls_recover_list_count = 0;
} }
spin_unlock(&ls->ls_recover_list_lock); spin_unlock_bh(&ls->ls_recover_list_lock);
} }
static int recover_idr_empty(struct dlm_ls *ls) static int recover_idr_empty(struct dlm_ls *ls)
{ {
int empty = 1; int empty = 1;
spin_lock(&ls->ls_recover_idr_lock); spin_lock_bh(&ls->ls_recover_idr_lock);
if (ls->ls_recover_list_count) if (ls->ls_recover_list_count)
empty = 0; empty = 0;
spin_unlock(&ls->ls_recover_idr_lock); spin_unlock_bh(&ls->ls_recover_idr_lock);
return empty; return empty;
} }
...@@ -310,7 +310,7 @@ static int recover_idr_add(struct dlm_rsb *r) ...@@ -310,7 +310,7 @@ static int recover_idr_add(struct dlm_rsb *r)
struct dlm_ls *ls = r->res_ls; struct dlm_ls *ls = r->res_ls;
int rv; int rv;
spin_lock(&ls->ls_recover_idr_lock); spin_lock_bh(&ls->ls_recover_idr_lock);
if (r->res_id) { if (r->res_id) {
rv = -1; rv = -1;
goto out_unlock; goto out_unlock;
...@@ -324,7 +324,7 @@ static int recover_idr_add(struct dlm_rsb *r) ...@@ -324,7 +324,7 @@ static int recover_idr_add(struct dlm_rsb *r)
dlm_hold_rsb(r); dlm_hold_rsb(r);
rv = 0; rv = 0;
out_unlock: out_unlock:
spin_unlock(&ls->ls_recover_idr_lock); spin_unlock_bh(&ls->ls_recover_idr_lock);
return rv; return rv;
} }
...@@ -332,11 +332,11 @@ static void recover_idr_del(struct dlm_rsb *r) ...@@ -332,11 +332,11 @@ static void recover_idr_del(struct dlm_rsb *r)
{ {
struct dlm_ls *ls = r->res_ls; struct dlm_ls *ls = r->res_ls;
spin_lock(&ls->ls_recover_idr_lock); spin_lock_bh(&ls->ls_recover_idr_lock);
idr_remove(&ls->ls_recover_idr, r->res_id); idr_remove(&ls->ls_recover_idr, r->res_id);
r->res_id = 0; r->res_id = 0;
ls->ls_recover_list_count--; ls->ls_recover_list_count--;
spin_unlock(&ls->ls_recover_idr_lock); spin_unlock_bh(&ls->ls_recover_idr_lock);
dlm_put_rsb(r); dlm_put_rsb(r);
} }
...@@ -345,9 +345,9 @@ static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id) ...@@ -345,9 +345,9 @@ static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
{ {
struct dlm_rsb *r; struct dlm_rsb *r;
spin_lock(&ls->ls_recover_idr_lock); spin_lock_bh(&ls->ls_recover_idr_lock);
r = idr_find(&ls->ls_recover_idr, (int)id); r = idr_find(&ls->ls_recover_idr, (int)id);
spin_unlock(&ls->ls_recover_idr_lock); spin_unlock_bh(&ls->ls_recover_idr_lock);
return r; return r;
} }
...@@ -356,7 +356,7 @@ static void recover_idr_clear(struct dlm_ls *ls) ...@@ -356,7 +356,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
struct dlm_rsb *r; struct dlm_rsb *r;
int id; int id;
spin_lock(&ls->ls_recover_idr_lock); spin_lock_bh(&ls->ls_recover_idr_lock);
idr_for_each_entry(&ls->ls_recover_idr, r, id) { idr_for_each_entry(&ls->ls_recover_idr, r, id) {
idr_remove(&ls->ls_recover_idr, id); idr_remove(&ls->ls_recover_idr, id);
...@@ -372,7 +372,7 @@ static void recover_idr_clear(struct dlm_ls *ls) ...@@ -372,7 +372,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
ls->ls_recover_list_count); ls->ls_recover_list_count);
ls->ls_recover_list_count = 0; ls->ls_recover_list_count = 0;
} }
spin_unlock(&ls->ls_recover_idr_lock); spin_unlock_bh(&ls->ls_recover_idr_lock);
} }
...@@ -887,7 +887,7 @@ void dlm_clear_toss(struct dlm_ls *ls) ...@@ -887,7 +887,7 @@ void dlm_clear_toss(struct dlm_ls *ls)
int i; int i;
for (i = 0; i < ls->ls_rsbtbl_size; i++) { for (i = 0; i < ls->ls_rsbtbl_size; i++) {
spin_lock(&ls->ls_rsbtbl[i].lock); spin_lock_bh(&ls->ls_rsbtbl[i].lock);
for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
next = rb_next(n); next = rb_next(n);
r = rb_entry(n, struct dlm_rsb, res_hashnode); r = rb_entry(n, struct dlm_rsb, res_hashnode);
...@@ -895,7 +895,7 @@ void dlm_clear_toss(struct dlm_ls *ls) ...@@ -895,7 +895,7 @@ void dlm_clear_toss(struct dlm_ls *ls)
dlm_free_rsb(r); dlm_free_rsb(r);
count++; count++;
} }
spin_unlock(&ls->ls_rsbtbl[i].lock); spin_unlock_bh(&ls->ls_rsbtbl[i].lock);
} }
if (count) if (count)
......
...@@ -26,7 +26,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls) ...@@ -26,7 +26,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls)
struct dlm_rsb *r; struct dlm_rsb *r;
int i, error = 0; int i, error = 0;
write_lock(&ls->ls_masters_lock); write_lock_bh(&ls->ls_masters_lock);
if (!list_empty(&ls->ls_masters_list)) { if (!list_empty(&ls->ls_masters_list)) {
log_error(ls, "root list not empty"); log_error(ls, "root list not empty");
error = -EINVAL; error = -EINVAL;
...@@ -46,7 +46,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls) ...@@ -46,7 +46,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls)
spin_unlock_bh(&ls->ls_rsbtbl[i].lock); spin_unlock_bh(&ls->ls_rsbtbl[i].lock);
} }
out: out:
write_unlock(&ls->ls_masters_lock); write_unlock_bh(&ls->ls_masters_lock);
return error; return error;
} }
...@@ -54,12 +54,12 @@ static void dlm_release_masters_list(struct dlm_ls *ls) ...@@ -54,12 +54,12 @@ static void dlm_release_masters_list(struct dlm_ls *ls)
{ {
struct dlm_rsb *r, *safe; struct dlm_rsb *r, *safe;
write_lock(&ls->ls_masters_lock); write_lock_bh(&ls->ls_masters_lock);
list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) { list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) {
list_del_init(&r->res_masters_list); list_del_init(&r->res_masters_list);
dlm_put_rsb(r); dlm_put_rsb(r);
} }
write_unlock(&ls->ls_masters_lock); write_unlock_bh(&ls->ls_masters_lock);
} }
static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list)
...@@ -103,9 +103,9 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) ...@@ -103,9 +103,9 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
{ {
int error = -EINTR; int error = -EINTR;
write_lock(&ls->ls_recv_active); write_lock_bh(&ls->ls_recv_active);
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) { if (ls->ls_recover_seq == seq) {
set_bit(LSFL_RUNNING, &ls->ls_flags); set_bit(LSFL_RUNNING, &ls->ls_flags);
/* unblocks processes waiting to enter the dlm */ /* unblocks processes waiting to enter the dlm */
...@@ -113,9 +113,9 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) ...@@ -113,9 +113,9 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
error = 0; error = 0;
} }
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
write_unlock(&ls->ls_recv_active); write_unlock_bh(&ls->ls_recv_active);
return error; return error;
} }
...@@ -349,12 +349,12 @@ static void do_ls_recovery(struct dlm_ls *ls) ...@@ -349,12 +349,12 @@ static void do_ls_recovery(struct dlm_ls *ls)
struct dlm_recover *rv = NULL; struct dlm_recover *rv = NULL;
int error; int error;
spin_lock(&ls->ls_recover_lock); spin_lock_bh(&ls->ls_recover_lock);
rv = ls->ls_recover_args; rv = ls->ls_recover_args;
ls->ls_recover_args = NULL; ls->ls_recover_args = NULL;
if (rv && ls->ls_recover_seq == rv->seq) if (rv && ls->ls_recover_seq == rv->seq)
clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags); clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
spin_unlock(&ls->ls_recover_lock); spin_unlock_bh(&ls->ls_recover_lock);
if (rv) { if (rv) {
error = ls_recover(ls, rv); error = ls_recover(ls, rv);
......
...@@ -68,7 +68,7 @@ int dlm_process_requestqueue(struct dlm_ls *ls) ...@@ -68,7 +68,7 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
struct dlm_message *ms; struct dlm_message *ms;
int error = 0; int error = 0;
write_lock(&ls->ls_requestqueue_lock); write_lock_bh(&ls->ls_requestqueue_lock);
for (;;) { for (;;) {
if (list_empty(&ls->ls_requestqueue)) { if (list_empty(&ls->ls_requestqueue)) {
clear_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); clear_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags);
...@@ -96,11 +96,11 @@ int dlm_process_requestqueue(struct dlm_ls *ls) ...@@ -96,11 +96,11 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
error = -EINTR; error = -EINTR;
break; break;
} }
write_unlock(&ls->ls_requestqueue_lock); write_unlock_bh(&ls->ls_requestqueue_lock);
schedule(); schedule();
write_lock(&ls->ls_requestqueue_lock); write_lock_bh(&ls->ls_requestqueue_lock);
} }
write_unlock(&ls->ls_requestqueue_lock); write_unlock_bh(&ls->ls_requestqueue_lock);
return error; return error;
} }
...@@ -135,7 +135,7 @@ void dlm_purge_requestqueue(struct dlm_ls *ls) ...@@ -135,7 +135,7 @@ void dlm_purge_requestqueue(struct dlm_ls *ls)
struct dlm_message *ms; struct dlm_message *ms;
struct rq_entry *e, *safe; struct rq_entry *e, *safe;
write_lock(&ls->ls_requestqueue_lock); write_lock_bh(&ls->ls_requestqueue_lock);
list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
ms = &e->request; ms = &e->request;
...@@ -144,6 +144,6 @@ void dlm_purge_requestqueue(struct dlm_ls *ls) ...@@ -144,6 +144,6 @@ void dlm_purge_requestqueue(struct dlm_ls *ls)
kfree(e); kfree(e);
} }
} }
write_unlock(&ls->ls_requestqueue_lock); write_unlock_bh(&ls->ls_requestqueue_lock);
} }
...@@ -189,7 +189,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, ...@@ -189,7 +189,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
return; return;
ls = lkb->lkb_resource->res_ls; ls = lkb->lkb_resource->res_ls;
spin_lock(&ls->ls_clear_proc_locks); spin_lock_bh(&ls->ls_clear_proc_locks);
/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
...@@ -211,7 +211,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, ...@@ -211,7 +211,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status)) if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags); set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags);
spin_lock(&proc->asts_spin); spin_lock_bh(&proc->asts_spin);
rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb); rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb);
switch (rv) { switch (rv) {
...@@ -232,23 +232,23 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, ...@@ -232,23 +232,23 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
case DLM_ENQUEUE_CALLBACK_FAILURE: case DLM_ENQUEUE_CALLBACK_FAILURE:
fallthrough; fallthrough;
default: default:
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
goto out; goto out;
} }
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
/* N.B. spin_lock locks_spin, not asts_spin */ /* N.B. spin_lock locks_spin, not asts_spin */
spin_lock(&proc->locks_spin); spin_lock_bh(&proc->locks_spin);
if (!list_empty(&lkb->lkb_ownqueue)) { if (!list_empty(&lkb->lkb_ownqueue)) {
list_del_init(&lkb->lkb_ownqueue); list_del_init(&lkb->lkb_ownqueue);
dlm_put_lkb(lkb); dlm_put_lkb(lkb);
} }
spin_unlock(&proc->locks_spin); spin_unlock_bh(&proc->locks_spin);
} }
out: out:
spin_unlock(&ls->ls_clear_proc_locks); spin_unlock_bh(&ls->ls_clear_proc_locks);
} }
static int device_user_lock(struct dlm_user_proc *proc, static int device_user_lock(struct dlm_user_proc *proc,
...@@ -817,10 +817,10 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, ...@@ -817,10 +817,10 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)) if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
return -EINVAL; return -EINVAL;
spin_lock(&proc->asts_spin); spin_lock_bh(&proc->asts_spin);
if (list_empty(&proc->asts)) { if (list_empty(&proc->asts)) {
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
return -EAGAIN; return -EAGAIN;
} }
...@@ -829,16 +829,16 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, ...@@ -829,16 +829,16 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
repeat: repeat:
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&proc->asts) && !signal_pending(current)) { if (list_empty(&proc->asts) && !signal_pending(current)) {
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
schedule(); schedule();
spin_lock(&proc->asts_spin); spin_lock_bh(&proc->asts_spin);
goto repeat; goto repeat;
} }
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
remove_wait_queue(&proc->wait, &wait); remove_wait_queue(&proc->wait, &wait);
if (signal_pending(current)) { if (signal_pending(current)) {
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
return -ERESTARTSYS; return -ERESTARTSYS;
} }
} }
...@@ -849,7 +849,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, ...@@ -849,7 +849,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
cb = list_first_entry(&proc->asts, struct dlm_callback, list); cb = list_first_entry(&proc->asts, struct dlm_callback, list);
list_del(&cb->list); list_del(&cb->list);
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
if (cb->flags & DLM_CB_BAST) { if (cb->flags & DLM_CB_BAST) {
trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name, trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
...@@ -874,12 +874,12 @@ static __poll_t device_poll(struct file *file, poll_table *wait) ...@@ -874,12 +874,12 @@ static __poll_t device_poll(struct file *file, poll_table *wait)
poll_wait(file, &proc->wait, wait); poll_wait(file, &proc->wait, wait);
spin_lock(&proc->asts_spin); spin_lock_bh(&proc->asts_spin);
if (!list_empty(&proc->asts)) { if (!list_empty(&proc->asts)) {
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
return EPOLLIN | EPOLLRDNORM; return EPOLLIN | EPOLLRDNORM;
} }
spin_unlock(&proc->asts_spin); spin_unlock_bh(&proc->asts_spin);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment