Commit d1e78238 authored by Xue jiufei's avatar Xue jiufei Committed by Linus Torvalds

ocfs2: do not set OCFS2_LOCK_UPCONVERT_FINISHING if nonblocking lock can not be granted at once

ocfs2_readpages() use nonblocking flag to avoid page lock inversion.  It
will trigger cluster hang because that flag OCFS2_LOCK_UPCONVERT_FINISHING
is not cleared if nonblocking lock cannot be granted at once.  The flag
would prevent dc thread from downconverting.  So other nodes cannot
acheive this lockres for ever.

So we should not set OCFS2_LOCK_UPCONVERT_FINISHING when receiving ast if
nonblocking lock had already returned.
Signed-off-by: default avatarjoyce.xue <xuejiufei@huawei.com>
Reviewed-by: default avatarJunxiao Bi <junxiao.bi@oracle.com>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dc171580
......@@ -861,8 +861,13 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
* We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
* the OCFS2_LOCK_BUSY flag to prevent the dc thread from
* downconverting the lock before the upconvert has fully completed.
* Do not prevent the dc thread from downconverting if NONBLOCK lock
* had already returned.
*/
if (!(lockres->l_flags & OCFS2_LOCK_NONBLOCK_FINISHED))
lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
else
lockres_clear_flags(lockres, OCFS2_LOCK_NONBLOCK_FINISHED);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
}
......@@ -1324,13 +1329,12 @@ static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
/* returns 0 if the mw that was removed was already satisfied, -EBUSY
* if the mask still hadn't reached its goal */
static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
struct ocfs2_mask_waiter *mw)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&lockres->l_lock, flags);
assert_spin_locked(&lockres->l_lock);
if (!list_empty(&mw->mw_item)) {
if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
ret = -EBUSY;
......@@ -1338,6 +1342,18 @@ static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
list_del_init(&mw->mw_item);
init_completion(&mw->mw_complete);
}
return ret;
}
static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
struct ocfs2_mask_waiter *mw)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&lockres->l_lock, flags);
ret = __lockres_remove_mask_waiter(lockres, mw);
spin_unlock_irqrestore(&lockres->l_lock, flags);
return ret;
......@@ -1373,6 +1389,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
unsigned long flags;
unsigned int gen;
int noqueue_attempted = 0;
int dlm_locked = 0;
ocfs2_init_mask_waiter(&mw);
......@@ -1481,6 +1498,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
ocfs2_recover_from_dlm_error(lockres, 1);
goto out;
}
dlm_locked = 1;
mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
lockres->l_name);
......@@ -1514,11 +1532,18 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
wait = 0;
if (lockres_remove_mask_waiter(lockres, &mw))
spin_lock_irqsave(&lockres->l_lock, flags);
if (__lockres_remove_mask_waiter(lockres, &mw)) {
if (dlm_locked)
lockres_or_flags(lockres,
OCFS2_LOCK_NONBLOCK_FINISHED);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = -EAGAIN;
else
} else {
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto again;
}
}
if (wait) {
ret = ocfs2_wait_for_mask(&mw);
if (ret == 0)
......
......@@ -144,6 +144,12 @@ enum ocfs2_unlock_action {
* before the upconvert
* has completed */
#define OCFS2_LOCK_NONBLOCK_FINISHED (0x00001000) /* NONBLOCK cluster
* lock has already
* returned, do not block
* dc thread from
* downconverting */
struct ocfs2_lock_res_ops;
typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment