Commit 67ae1f06 authored by Sunil Mushran's avatar Sunil Mushran Committed by Mark Fasheh

ocfs2/dlm: Indent dlm_cleanup_master_list()

The previous patch explicitly did not indent dlm_cleanup_master_list()
so as to make the patch readable. This patch properly indents the
function.
Signed-off-by: default avatarSunil Mushran <sunil.mushran@oracle.com>
Signed-off-by: default avatarMark Fasheh <mfasheh@suse.com>
parent 2ed6c750
...@@ -3324,43 +3324,42 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) ...@@ -3324,43 +3324,42 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
mle->type != DLM_MLE_MASTER && mle->type != DLM_MLE_MASTER &&
mle->type != DLM_MLE_MIGRATION); mle->type != DLM_MLE_MIGRATION);
/* MASTER mles are initiated locally. the waiting /* MASTER mles are initiated locally. The waiting
* process will notice the node map change * process will notice the node map change shortly.
* shortly. let that happen as normal. */ * Let that happen as normal. */
if (mle->type == DLM_MLE_MASTER) if (mle->type == DLM_MLE_MASTER)
continue; continue;
/* BLOCK mles are initiated by other nodes. Need to
/* BLOCK mles are initiated by other nodes. * clean up if the dead node would have been the
* need to clean up if the dead node would have * master. */
* been the master. */
if (mle->type == DLM_MLE_BLOCK) { if (mle->type == DLM_MLE_BLOCK) {
dlm_clean_block_mle(dlm, mle, dead_node); dlm_clean_block_mle(dlm, mle, dead_node);
continue; continue;
} }
/* everything else is a MIGRATION mle */ /* Everything else is a MIGRATION mle */
/* The rule for MIGRATION mles is that the master
* becomes UNKNOWN if *either* the original or the new
* master dies. All UNKNOWN lockres' are sent to
* whichever node becomes the recovery master. The new
* master is responsible for determining if there is
* still a master for this lockres, or if he needs to
* take over mastery. Either way, this node should
* expect another message to resolve this. */
/* the rule for MIGRATION mles is that the master
* becomes UNKNOWN if *either* the original or
* the new master dies. all UNKNOWN lockreses
* are sent to whichever node becomes the recovery
* master. the new master is responsible for
* determining if there is still a master for
* this lockres, or if he needs to take over
* mastery. either way, this node should expect
* another message to resolve this. */
if (mle->master != dead_node && if (mle->master != dead_node &&
mle->new_master != dead_node) mle->new_master != dead_node)
continue; continue;
/* if we have reached this point, this mle needs to /* If we have reached this point, this mle needs to be
* be removed from the list and freed. */ * removed from the list and freed. */
dlm_clean_migration_mle(dlm, mle); dlm_clean_migration_mle(dlm, mle);
mlog(0, "%s: node %u died during migration from " mlog(0, "%s: node %u died during migration from "
"%u to %u!\n", dlm->name, dead_node, "%u to %u!\n", dlm->name, dead_node, mle->master,
mle->master, mle->new_master); mle->new_master);
/* If we find a lockres associated with the mle, we've /* If we find a lockres associated with the mle, we've
* hit this rare case that messes up our lock ordering. * hit this rare case that messes up our lock ordering.
...@@ -3372,14 +3371,13 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) ...@@ -3372,14 +3371,13 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
/* restart */ /* restart */
goto top; goto top;
/* this may be the last reference */ /* This may be the last reference */
__dlm_put_mle(mle); __dlm_put_mle(mle);
} }
} }
spin_unlock(&dlm->master_lock); spin_unlock(&dlm->master_lock);
} }
int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 old_master) u8 old_master)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment