Commit 5a9a80ba authored by Bruce Korb's avatar Bruce Korb Committed by Greg Kroah-Hartman

staging: lustre: ldlm: use accessor macros for l_flags

Convert most of the ldlm lock's l_flags references from direct
bit twiddling to using bit specific macros.  A few multi-bit
operations are left as an exercise for the reader.

The changes are mostly in ldlm, but also in llite, osc and quota.
Also add a multi-bit (mask) test.
Signed-off-by: default avatarBruce Korb <bruce.korb@gmail.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-2906Reviewed-by: default avatarKeith Mannthey <Keith.Mannthey@intel.com>
Reviewed-on: http://review.whamcloud.com/7963Reviewed-by: default avatarDoug Oucharek <doug.s.oucharek@intel.com>
Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0fdd2b8d
......@@ -381,6 +381,9 @@
/** test for ldlm_lock flag bit set */
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
/** multi-bit test: are any of mask bits set? */
#define LDLM_HAVE_MASK(_l, _m) ((_l)->l_flags & LDLM_FL_##_m##_MASK)
/** set a ldlm_lock flag bit */
#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
......
......@@ -54,7 +54,7 @@ struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
lock_res(lock->l_resource);
lock->l_flags |= LDLM_FL_RES_LOCKED;
ldlm_set_res_locked(lock);
return lock->l_resource;
}
EXPORT_SYMBOL(lock_res_and_lock);
......@@ -65,7 +65,7 @@ EXPORT_SYMBOL(lock_res_and_lock);
void unlock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
lock->l_flags &= ~LDLM_FL_RES_LOCKED;
ldlm_clear_res_locked(lock);
unlock_res(lock->l_resource);
spin_unlock(&lock->l_lock);
......
......@@ -75,12 +75,12 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
* just after we finish and take our lock into account in its
* calculation of the kms
*/
lock->l_flags |= LDLM_FL_KMS_IGNORE;
ldlm_set_kms_ignore(lock);
list_for_each(tmp, &res->lr_granted) {
lck = list_entry(tmp, struct ldlm_lock, l_res_link);
if (lck->l_flags & LDLM_FL_KMS_IGNORE)
if (ldlm_is_kms_ignore(lck))
continue;
if (lck->l_policy_data.l_extent.end >= old_kms)
......
......@@ -101,8 +101,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
if (flags == LDLM_FL_WAIT_NOREPROC &&
!(lock->l_flags & LDLM_FL_FAILED)) {
if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
......@@ -436,7 +435,7 @@ ldlm_flock_interrupted_wait(void *data)
lock_res_and_lock(lock);
/* client side - set flag to prevent lock from being put on LRU list */
lock->l_flags |= LDLM_FL_CBPENDING;
ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
}
......@@ -520,7 +519,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
if (lock->l_flags & LDLM_FL_FAILED) {
if (ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
return -EIO;
}
......@@ -533,7 +532,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
* Protect against race where lock could have been just destroyed
* due to overlap in ldlm_process_flock_lock().
*/
if (lock->l_flags & LDLM_FL_DESTROYED) {
if (ldlm_is_destroyed(lock)) {
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
return 0;
......@@ -542,7 +541,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
if (ldlm_is_flock_deadlock(lock)) {
LDLM_DEBUG(lock, "client-side enqueue deadlock received");
rc = -EDEADLK;
} else if (flags & LDLM_FL_TEST_LOCK) {
......
......@@ -305,9 +305,10 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
int ret = 0;
lock_res_and_lock(lock);
if (((lock->l_req_mode == lock->l_granted_mode) &&
!(lock->l_flags & LDLM_FL_CP_REQD)) ||
(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL)))
if ((lock->l_req_mode == lock->l_granted_mode) &&
!ldlm_is_cp_reqd(lock))
ret = 1;
else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
unlock_res_and_lock(lock);
......
......@@ -124,10 +124,10 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
LDLM_DEBUG(lock, "client blocking AST callback handler");
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_CBPENDING;
ldlm_set_cbpending(lock);
if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
lock->l_flags |= LDLM_FL_CANCEL;
if (ldlm_is_cancel_on_block(lock))
ldlm_set_cancel(lock);
do_ast = !lock->l_readers && !lock->l_writers;
unlock_res_and_lock(lock);
......@@ -172,7 +172,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
if (lock->l_granted_mode == lock->l_req_mode ||
lock->l_flags & LDLM_FL_DESTROYED)
ldlm_is_destroyed(lock))
break;
}
}
......@@ -215,7 +215,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
}
lock_res_and_lock(lock);
if ((lock->l_flags & LDLM_FL_DESTROYED) ||
if (ldlm_is_destroyed(lock) ||
lock->l_granted_mode == lock->l_req_mode) {
/* bug 11300: the lock has already been granted */
unlock_res_and_lock(lock);
......@@ -291,7 +291,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
out:
if (rc < 0) {
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_FAILED;
ldlm_set_failed(lock);
unlock_res_and_lock(lock);
wake_up(&lock->l_waitq);
}
......@@ -360,8 +360,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
spin_lock(&blp->blp_lock);
if (blwi->blwi_lock &&
blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
if (blwi->blwi_lock && ldlm_is_discard_data(blwi->blwi_lock)) {
/* add LDLM_FL_DISCARD_DATA requests to the priority list */
list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
} else {
......@@ -626,7 +625,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
return 0;
}
if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
if (ldlm_is_fail_loc(lock) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
......@@ -640,9 +639,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
* we can tell the server we have no lock. Otherwise, we
* should send cancel after dropping the cache.
*/
if (((lock->l_flags & LDLM_FL_CANCELING) &&
(lock->l_flags & LDLM_FL_BL_DONE)) ||
(lock->l_flags & LDLM_FL_FAILED)) {
if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
unlock_res_and_lock(lock);
......@@ -656,7 +654,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
* Let ldlm_cancel_lru() be fast.
*/
ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_BL_AST;
ldlm_set_bl_ast(lock);
}
unlock_res_and_lock(lock);
......@@ -674,7 +672,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
case LDLM_BL_CALLBACK:
CDEBUG(D_INODE, "blocking ast\n");
req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
if (!ldlm_is_cancel_on_block(lock)) {
rc = ldlm_callback_reply(req, 0);
if (req->rq_no_reply || rc)
ldlm_callback_errmsg(req, "Normal process", rc,
......
......@@ -153,7 +153,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
long delay;
int result;
if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
} else {
......@@ -252,7 +252,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
lwd.lwd_lock = lock;
if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
......@@ -269,7 +269,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
lock->l_flags |= LDLM_FL_FAIL_LOC;
ldlm_set_fail_loc(lock);
rc = -EINTR;
} else {
/* Go to sleep until the lock is granted or cancelled. */
......@@ -296,7 +296,7 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* Check that lock is not granted or failed, we might race. */
if ((lock->l_req_mode != lock->l_granted_mode) &&
!(lock->l_flags & LDLM_FL_FAILED)) {
!ldlm_is_failed(lock)) {
/* Make sure that this lock will not be found by raced
* bl_ast and -EINVAL reply is sent to server anyways.
* bug 17645
......@@ -821,12 +821,11 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "client-side cancel");
/* Set this flag to prevent others from getting new references*/
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_CBPENDING;
ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
(LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
ldlm_cancel_callback(lock);
rc = (lock->l_flags & LDLM_FL_BL_AST) ?
LDLM_FL_BL_AST : LDLM_FL_CANCELING;
rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
unlock_res_and_lock(lock);
if (local_only) {
......@@ -1150,7 +1149,7 @@ ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
default:
result = LDLM_POLICY_SKIP_LOCK;
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_SKIPPED;
ldlm_set_skipped(lock);
unlock_res_and_lock(lock);
break;
}
......@@ -1381,9 +1380,9 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
/* No locks which got blocking requests. */
LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
LASSERT(!ldlm_is_bl_ast(lock));
if (no_wait && lock->l_flags & LDLM_FL_SKIPPED)
if (no_wait && ldlm_is_skipped(lock))
/* already processed */
continue;
......@@ -1394,7 +1393,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again.
*/
if (!(lock->l_flags & LDLM_FL_CANCELING))
if (!ldlm_is_canceling(lock))
break;
ldlm_lock_remove_from_lru_nolock(lock);
......@@ -1437,7 +1436,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* Check flags again under the lock. */
if ((lock->l_flags & LDLM_FL_CANCELING) ||
if (ldlm_is_canceling(lock) ||
(ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
......@@ -1461,7 +1460,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* where while we are doing cancel here, server is also
* silently cancelling this lock.
*/
lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
ldlm_clear_cancel_on_block(lock);
/* Setting the CBPENDING flag is a little misleading,
* but prevents an important race; namely, once
......@@ -1558,8 +1557,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
/* If somebody is already doing CANCEL, or blocking AST came,
* skip this lock.
*/
if (lock->l_flags & LDLM_FL_BL_AST ||
lock->l_flags & LDLM_FL_CANCELING)
if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
continue;
if (lockmode_compat(lock->l_granted_mode, mode))
......@@ -1918,7 +1916,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
int flags;
/* Bug 11974: Do not replay a lock which is actively being canceled */
if (lock->l_flags & LDLM_FL_CANCELING) {
if (ldlm_is_canceling(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
return 0;
}
......@@ -1927,7 +1925,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
* server might have long dropped it, but notification of that event was
* lost by network. (and server granted conflicting lock already)
*/
if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
if (ldlm_is_cancel_on_block(lock)) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
return 0;
......
......@@ -758,12 +758,12 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
list_for_each(tmp, q) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
if (lock->l_flags & LDLM_FL_CLEANED) {
if (ldlm_is_cleaned(lock)) {
lock = NULL;
continue;
}
LDLM_LOCK_GET(lock);
lock->l_flags |= LDLM_FL_CLEANED;
ldlm_set_cleaned(lock);
break;
}
......@@ -775,13 +775,13 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
/* Set CBPENDING so nothing in the cancellation path
* can match this lock.
*/
lock->l_flags |= LDLM_FL_CBPENDING;
lock->l_flags |= LDLM_FL_FAILED;
ldlm_set_cbpending(lock);
ldlm_set_failed(lock);
lock->l_flags |= flags;
/* ... without sending a CANCEL message for local_only. */
if (local_only)
lock->l_flags |= LDLM_FL_LOCAL_ONLY;
ldlm_set_local_only(lock);
if (local_only && (lock->l_readers || lock->l_writers)) {
/* This is a little bit gross, but much better than the
......@@ -1275,7 +1275,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
LDLM_DEBUG(lock, "About to add this lock:\n");
if (lock->l_flags & LDLM_FL_DESTROYED) {
if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
......
......@@ -108,11 +108,8 @@ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
static inline int return_if_equal(struct ldlm_lock *lock, void *data)
{
if ((lock->l_flags &
(LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
(LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
return LDLM_ITER_CONTINUE;
return LDLM_ITER_STOP;
return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ?
LDLM_ITER_CONTINUE : LDLM_ITER_STOP;
}
/* find any ldlm lock of the inode in mdc and lov
......
......@@ -3355,10 +3355,10 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
int rc;
CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
lock->l_lvb_data, lock->l_lvb_len);
if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY))
if (lock->l_lvb_data && ldlm_is_lvb_ready(lock))
return 0;
/* if layout lock was granted right away, the layout is returned
......@@ -3442,7 +3442,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
lock_res_and_lock(lock);
lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
lvb_ready = ldlm_is_lvb_ready(lock);
unlock_res_and_lock(lock);
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time.
......
......@@ -190,7 +190,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
break;
/* Invalidate all dentries associated with this inode */
LASSERT(lock->l_flags & LDLM_FL_CANCELING);
LASSERT(ldlm_is_canceling(lock));
if (!fid_res_name_eq(ll_inode2fid(inode),
&lock->l_resource->lr_name)) {
......
......@@ -120,7 +120,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
* ast.
*/
if (!ergo(olock && ols->ols_state < OLS_CANCELLED,
((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
!ldlm_is_destroyed(olock)))
return 0;
if (!ergo(ols->ols_state == OLS_GRANTED,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment