Commit 58da1ffb authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.2.6 : Multiple discovery fixes

Multiple Discovery Fixes:
- Fix race on discovery due to link events coinciding with vport_delete.
- Use NLP_FABRIC state to filter out switch-based pseudo initiators that
   reuse the same WWNs.
- Correct erroneous setting of DID=0 in lpfc_matchdid()
- Correct extra reference count that was in the lookup path for the
  remoteid from an unsolicited ELS.
- Correct double-free bug in els abort path.
- Correct FDMI server discovery logic for switch that return a WWN of 0.
- Fix bugs in ndlp mgmt when a node changes address
- Correct bug that did not delete RSCNs for vports upon link transitions
- Fix "0216 Link event during NS query" error which pops up when vports
  are swapped to different switch ports.
- Add sanity checks on ndlp structures
- Fix devloss log message to dump WWN correctly
- Hold off mgmt commands that were interferring with discovery mailbox cmds
- Remove unnecessary FC_ESTABLISH_LINK logic.
- Correct some race conditions in the worker thread, resulting in devloss:
  - Clear the work_port_events field before handling the work port events
  - Clear the deferred ring event before handling a deferred ring event
  - Hold the hba lock when waking up the work thread
  - Send an acc for the rscn even when we aren't going to handle it
- Fix locking behavior that was not properly protecting the ACTIVE flag,
  thus allowing mailbox command order to shift.
Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent b35c07d0
...@@ -268,7 +268,6 @@ struct lpfc_vport { ...@@ -268,7 +268,6 @@ struct lpfc_vport {
#define FC_NLP_MORE 0x40 /* More node to process in node tbl */ #define FC_NLP_MORE 0x40 /* More node to process in node tbl */
#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ #define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
#define FC_FABRIC 0x100 /* We are fabric attached */ #define FC_FABRIC 0x100 /* We are fabric attached */
#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
...@@ -433,8 +432,6 @@ struct lpfc_hba { ...@@ -433,8 +432,6 @@ struct lpfc_hba {
uint32_t fc_eventTag; /* event tag for link attention */ uint32_t fc_eventTag; /* event tag for link attention */
struct timer_list fc_estabtmo; /* link establishment timer */
/* These fields used to be binfo */ /* These fields used to be binfo */
uint32_t fc_pref_DID; /* preferred D_ID */ uint32_t fc_pref_DID; /* preferred D_ID */
uint8_t fc_pref_ALPA; /* preferred AL_PA */ uint8_t fc_pref_ALPA; /* preferred AL_PA */
......
...@@ -1962,7 +1962,11 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, ...@@ -1962,7 +1962,11 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
phba->sysfs_mbox.mbox->vport = vport; phba->sysfs_mbox.mbox->vport = vport;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { /* Don't allow mailbox commands to be sent when blocked
* or when in the middle of discovery
*/
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO ||
vport->fc_flag & FC_NDISC_ACTIVE) {
sysfs_mbox_idle(phba); sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
return -EAGAIN; return -EAGAIN;
......
...@@ -438,7 +438,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) ...@@ -438,7 +438,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
(!(vport->ct_flags & FC_CT_RFF_ID)) || (!(vport->ct_flags & FC_CT_RFF_ID)) ||
(!vport->cfg_restrict_login)) { (!vport->cfg_restrict_login)) {
ndlp = lpfc_setup_disc_node(vport, Did); ndlp = lpfc_setup_disc_node(vport, Did);
if (ndlp) { if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
lpfc_debugfs_disc_trc(vport, lpfc_debugfs_disc_trc(vport,
LPFC_DISC_TRC_CT, LPFC_DISC_TRC_CT,
"Parse GID_FTrsp: " "Parse GID_FTrsp: "
...@@ -543,7 +543,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -543,7 +543,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *outp; struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp; struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
int rc, retry; int rc;
/* First save ndlp, before we overwrite it */ /* First save ndlp, before we overwrite it */
ndlp = cmdiocb->context_un.ndlp; ndlp = cmdiocb->context_un.ndlp;
...@@ -563,45 +563,29 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -563,45 +563,29 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (vport->load_flag & FC_UNLOADING) if (vport->load_flag & FC_UNLOADING)
goto out; goto out;
if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) { if (lpfc_els_chk_latt(vport)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Link event during NS query\n"); "0216 Link event during NS query\n");
lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out; goto out;
} }
if (lpfc_error_lost_link(irsp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0226 NS query failed due to link event\n");
goto out;
}
if (irsp->ulpStatus) { if (irsp->ulpStatus) {
/* Check for retry */ /* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
retry = 1; if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)
switch (irsp->un.ulpWord[4]) {
case IOERR_NO_RESOURCES:
/* We don't increment the retry
* count for this case.
*/
break;
case IOERR_LINK_DOWN:
case IOERR_SLI_ABORTED:
case IOERR_SLI_DOWN:
retry = 0;
break;
default:
vport->fc_ns_retry++;
}
}
else
vport->fc_ns_retry++; vport->fc_ns_retry++;
if (retry) { /* CT command is being retried */
/* CT command is being retried */ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
vport->fc_ns_retry, 0); vport->fc_ns_retry, 0);
if (rc == 0) { if (rc == 0)
/* success */ goto out;
goto out;
}
}
} }
lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
...@@ -780,7 +764,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -780,7 +764,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* This is a target port, unregistered port, or the GFF_ID failed */ /* This is a target port, unregistered port, or the GFF_ID failed */
ndlp = lpfc_setup_disc_node(vport, did); ndlp = lpfc_setup_disc_node(vport, did);
if (ndlp) { if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0242 Process x%x GFF " "0242 Process x%x GFF "
"NameServer Rsp Data: x%x x%x x%x\n", "NameServer Rsp Data: x%x x%x x%x\n",
......
...@@ -503,6 +503,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -503,6 +503,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
ndlp->nlp_sid); ndlp->nlp_sid);
if (ndlp->nlp_type & NLP_FCP_INITIATOR) if (ndlp->nlp_type & NLP_FCP_INITIATOR)
len += snprintf(buf+len, size-len, "FCP_INITIATOR "); len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
len += snprintf(buf+len, size-len, "usgmap:%x ",
ndlp->nlp_usg_map);
len += snprintf(buf+len, size-len, "refcnt:%x", len += snprintf(buf+len, size-len, "refcnt:%x",
atomic_read(&ndlp->kref.refcount)); atomic_read(&ndlp->kref.refcount));
len += snprintf(buf+len, size-len, "\n"); len += snprintf(buf+len, size-len, "\n");
......
...@@ -719,9 +719,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) ...@@ -719,9 +719,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
icmd->un.elsreq64.bdl.ulpIoTag32) { icmd->un.elsreq64.bdl.ulpIoTag32) {
ndlp = (struct lpfc_nodelist *)(iocb->context1); ndlp = (struct lpfc_nodelist *)(iocb->context1);
if (ndlp && (ndlp->nlp_DID == Fabric_DID)) { if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
(ndlp->nlp_DID == Fabric_DID))
lpfc_sli_issue_abort_iotag(phba, pring, iocb); lpfc_sli_issue_abort_iotag(phba, pring, iocb);
}
} }
} }
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
...@@ -829,7 +829,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, ...@@ -829,7 +829,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct fc_rport *rport; struct fc_rport *rport;
struct serv_parm *sp; struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)]; uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc; uint32_t rc, keepDID = 0;
/* Fabric nodes can have the same WWPN so we don't bother searching /* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us. * by WWPN. Just return the ndlp that was given to us.
...@@ -858,11 +858,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, ...@@ -858,11 +858,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
return ndlp; return ndlp;
lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
} else if (!NLP_CHK_NODE_ACT(new_ndlp)) { } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
if (!rc)
return ndlp;
new_ndlp = lpfc_enable_node(vport, new_ndlp, new_ndlp = lpfc_enable_node(vport, new_ndlp,
NLP_STE_UNUSED_NODE); NLP_STE_UNUSED_NODE);
if (!new_ndlp) if (!new_ndlp)
return ndlp; return ndlp;
} keepDID = new_ndlp->nlp_DID;
} else
keepDID = new_ndlp->nlp_DID;
lpfc_unreg_rpi(vport, new_ndlp); lpfc_unreg_rpi(vport, new_ndlp);
new_ndlp->nlp_DID = ndlp->nlp_DID; new_ndlp->nlp_DID = ndlp->nlp_DID;
...@@ -893,12 +899,24 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, ...@@ -893,12 +899,24 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
} }
new_ndlp->nlp_type = ndlp->nlp_type; new_ndlp->nlp_type = ndlp->nlp_type;
} }
/* We shall actually free the ndlp with both nlp_DID and
* nlp_portname fields equals 0 to avoid any ndlp on the
* nodelist never to be used.
*/
if (ndlp->nlp_DID == 0) {
spin_lock_irq(&phba->ndlp_lock);
NLP_SET_FREE_REQ(ndlp);
spin_unlock_irq(&phba->ndlp_lock);
}
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
lpfc_drop_node(vport, ndlp); lpfc_drop_node(vport, ndlp);
} }
else { else {
lpfc_unreg_rpi(vport, ndlp); lpfc_unreg_rpi(vport, ndlp);
ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ /* Two ndlps cannot have the same did */
ndlp->nlp_DID = keepDID;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
} }
return new_ndlp; return new_ndlp;
...@@ -2091,7 +2109,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -2091,7 +2109,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
phba->fc_stat.elsXmitRetry++; phba->fc_stat.elsXmitRetry++;
if (ndlp && delay) { if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
phba->fc_stat.elsDelayRetry++; phba->fc_stat.elsDelayRetry++;
ndlp->nlp_retry = cmdiocb->retry; ndlp->nlp_retry = cmdiocb->retry;
...@@ -2121,7 +2139,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -2121,7 +2139,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
return 1; return 1;
case ELS_CMD_PLOGI: case ELS_CMD_PLOGI:
if (ndlp) { if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
ndlp->nlp_prev_state = ndlp->nlp_state; ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PLOGI_ISSUE); NLP_STE_PLOGI_ISSUE);
...@@ -2302,7 +2320,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ...@@ -2302,7 +2320,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys); lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp); kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool); mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) { if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
/* This is the end of the default RPI cleanup logic for this /* This is the end of the default RPI cleanup logic for this
* ndlp. If no other discovery threads are using this ndlp. * ndlp. If no other discovery threads are using this ndlp.
...@@ -2335,7 +2353,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -2335,7 +2353,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* function can have cmdiocb->contest1 (ndlp) field set to NULL. * function can have cmdiocb->contest1 (ndlp) field set to NULL.
*/ */
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
(*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
/* A LS_RJT associated with Default RPI cleanup has its own /* A LS_RJT associated with Default RPI cleanup has its own
* seperate code path. * seperate code path.
*/ */
...@@ -2344,7 +2363,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -2344,7 +2363,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
/* Check to see if link went down during discovery */ /* Check to see if link went down during discovery */
if (!ndlp || lpfc_els_chk_latt(vport)) { if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
if (mbox) { if (mbox) {
mp = (struct lpfc_dmabuf *) mbox->context1; mp = (struct lpfc_dmabuf *) mbox->context1;
if (mp) { if (mp) {
...@@ -2353,7 +2372,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -2353,7 +2372,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
} }
if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
if (lpfc_nlp_not_used(ndlp)) { if (lpfc_nlp_not_used(ndlp)) {
ndlp = NULL; ndlp = NULL;
/* Indicate the node has already released, /* Indicate the node has already released,
...@@ -2443,7 +2463,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -2443,7 +2463,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
} }
out: out:
if (ndlp) { if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
...@@ -3139,6 +3159,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, ...@@ -3139,6 +3159,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Another thread is walking fc_rscn_id_list on this vport */ /* Another thread is walking fc_rscn_id_list on this vport */
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DISCOVERY; vport->fc_flag |= FC_RSCN_DISCOVERY;
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return 0; return 0;
} }
/* Indicate we are walking fc_rscn_id_list on this vport */ /* Indicate we are walking fc_rscn_id_list on this vport */
...@@ -3928,7 +3950,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) ...@@ -3928,7 +3950,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
else { else {
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
if (ndlp) if (ndlp && NLP_CHK_NODE_ACT(ndlp))
remote_ID = ndlp->nlp_DID; remote_ID = ndlp->nlp_DID;
} }
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
...@@ -4097,21 +4119,22 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ...@@ -4097,21 +4119,22 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
newnode = 1; newnode = 1;
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
ndlp->nlp_type |= NLP_FABRIC; ndlp->nlp_type |= NLP_FABRIC;
} else { } else if (!NLP_CHK_NODE_ACT(ndlp)) {
if (!NLP_CHK_NODE_ACT(ndlp)) { ndlp = lpfc_enable_node(vport, ndlp,
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
NLP_STE_UNUSED_NODE); if (!ndlp)
if (!ndlp) goto dropit;
goto dropit; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
} newnode = 1;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
/* This is simular to the new node path */ ndlp->nlp_type |= NLP_FABRIC;
ndlp = lpfc_nlp_get(ndlp); } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
if (!ndlp) /* This is similar to the new node path */
goto dropit; ndlp = lpfc_nlp_get(ndlp);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); if (!ndlp)
newnode = 1; goto dropit;
} lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
newnode = 1;
} }
phba->fc_stat.elsRcvFrame++; phba->fc_stat.elsRcvFrame++;
...@@ -4451,7 +4474,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) ...@@ -4451,7 +4474,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return; return;
} }
lpfc_nlp_init(vport, ndlp, NameServer_DID); lpfc_nlp_init(vport, ndlp, NameServer_DID);
ndlp->nlp_type |= NLP_FABRIC;
} else if (!NLP_CHK_NODE_ACT(ndlp)) { } else if (!NLP_CHK_NODE_ACT(ndlp)) {
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
if (!ndlp) { if (!ndlp) {
...@@ -4465,6 +4487,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) ...@@ -4465,6 +4487,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return; return;
} }
} }
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
...@@ -4481,8 +4504,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) ...@@ -4481,8 +4504,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
if (ndlp_fdmi) { if (ndlp_fdmi) {
lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
ndlp_fdmi->nlp_type |= NLP_FABRIC; ndlp_fdmi->nlp_type |= NLP_FABRIC;
ndlp_fdmi->nlp_state = lpfc_nlp_set_state(vport, ndlp_fdmi,
NLP_STE_PLOGI_ISSUE; NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
0); 0);
} }
......
...@@ -69,7 +69,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) ...@@ -69,7 +69,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
rdata = rport->dd_data; rdata = rport->dd_data;
ndlp = rdata->pnode; ndlp = rdata->pnode;
if (!ndlp) { if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
printk(KERN_ERR "Cannot find remote node" printk(KERN_ERR "Cannot find remote node"
" to terminate I/O Data x%x\n", " to terminate I/O Data x%x\n",
...@@ -114,7 +114,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ...@@ -114,7 +114,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
rdata = rport->dd_data; rdata = rport->dd_data;
ndlp = rdata->pnode; ndlp = rdata->pnode;
if (!ndlp) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
return; return;
vport = ndlp->vport; vport = ndlp->vport;
...@@ -243,8 +243,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ...@@ -243,8 +243,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (warn_on) { if (warn_on) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0203 Devloss timeout on " "0203 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
"NPort x%x Data: x%x x%x x%x\n", "NPort x%06x Data: x%x x%x x%x\n",
*name, *(name+1), *(name+2), *(name+3), *name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7), *(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_DID, ndlp->nlp_flag,
...@@ -252,8 +252,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ...@@ -252,8 +252,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
} else { } else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0204 Devloss timeout on " "0204 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
"NPort x%x Data: x%x x%x x%x\n", "NPort x%06x Data: x%x x%x x%x\n",
*name, *(name+1), *(name+2), *(name+3), *name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7), *(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_DID, ndlp->nlp_flag,
...@@ -399,7 +399,10 @@ lpfc_work_done(struct lpfc_hba *phba) ...@@ -399,7 +399,10 @@ lpfc_work_done(struct lpfc_hba *phba)
vport = vports[i]; vport = vports[i];
if (vport == NULL) if (vport == NULL)
break; break;
spin_lock_irq(&vport->work_port_lock);
work_port_events = vport->work_port_events; work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
if (work_port_events & WORKER_DISC_TMO) if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport); lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO) if (work_port_events & WORKER_ELS_TMO)
...@@ -416,9 +419,6 @@ lpfc_work_done(struct lpfc_hba *phba) ...@@ -416,9 +419,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_ramp_down_queue_handler(phba); lpfc_ramp_down_queue_handler(phba);
if (work_port_events & WORKER_RAMP_UP_QUEUE) if (work_port_events & WORKER_RAMP_UP_QUEUE)
lpfc_ramp_up_queue_handler(phba); lpfc_ramp_up_queue_handler(phba);
spin_lock_irq(&vport->work_port_lock);
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
} }
lpfc_destroy_vport_work_array(phba, vports); lpfc_destroy_vport_work_array(phba, vports);
...@@ -430,10 +430,10 @@ lpfc_work_done(struct lpfc_hba *phba) ...@@ -430,10 +430,10 @@ lpfc_work_done(struct lpfc_hba *phba)
if (pring->flag & LPFC_STOP_IOCB_EVENT) { if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT; pring->flag |= LPFC_DEFERRED_RING_EVENT;
} else { } else {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring, lpfc_sli_handle_slow_ring_event(phba, pring,
(status & (status &
HA_RXMASK)); HA_RXMASK));
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
} }
/* /*
* Turn on Ring interrupts * Turn on Ring interrupts
...@@ -519,7 +519,9 @@ lpfc_do_work(void *p) ...@@ -519,7 +519,9 @@ lpfc_do_work(void *p)
schedule(); schedule();
} }
} }
spin_lock_irq(&phba->hbalock);
phba->work_wait = NULL; phba->work_wait = NULL;
spin_unlock_irq(&phba->hbalock);
return 0; return 0;
} }
...@@ -809,11 +811,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ...@@ -809,11 +811,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool); mempool_free(pmb, phba->mbox_mem_pool);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK); vport->fc_flag &= ~FC_ABORT_DISCOVERY;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
del_timer_sync(&phba->fc_estabtmo);
lpfc_can_disctmo(vport); lpfc_can_disctmo(vport);
/* turn on Link Attention interrupts */ /* turn on Link Attention interrupts */
...@@ -1340,10 +1340,14 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ...@@ -1340,10 +1340,14 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
i++) { i++) {
if (vports[i]->port_type == LPFC_PHYSICAL_PORT) if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
continue; continue;
if (phba->fc_topology == TOPOLOGY_LOOP) {
lpfc_vport_set_state(vports[i],
FC_VPORT_LINKDOWN);
continue;
}
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vports[i]); lpfc_initial_fdisc(vports[i]);
else if (phba->sli3_options & else {
LPFC_SLI3_NPIV_ENABLED) {
lpfc_vport_set_state(vports[i], lpfc_vport_set_state(vports[i],
FC_VPORT_NO_FABRIC_SUPP); FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vport, KERN_ERR, lpfc_printf_vlog(vport, KERN_ERR,
...@@ -2190,10 +2194,6 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -2190,10 +2194,6 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (did == Bcast_DID) if (did == Bcast_DID)
return 0; return 0;
if (ndlp->nlp_DID == 0) {
return 0;
}
/* First check for Direct match */ /* First check for Direct match */
if (ndlp->nlp_DID == did) if (ndlp->nlp_DID == did)
return 1; return 1;
...@@ -2301,7 +2301,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ...@@ -2301,7 +2301,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
return ndlp; return ndlp;
} }
if (vport->fc_flag & FC_RSCN_MODE) { if ((vport->fc_flag & FC_RSCN_MODE) &&
!(vport->fc_flag & FC_NDISC_ACTIVE)) {
if (lpfc_rscn_payload_check(vport, did)) { if (lpfc_rscn_payload_check(vport, did)) {
/* If we've already recieved a PLOGI from this NPort /* If we've already recieved a PLOGI from this NPort
* we don't need to try to discover it again. * we don't need to try to discover it again.
......
...@@ -559,8 +559,10 @@ lpfc_hb_timeout(unsigned long ptr) ...@@ -559,8 +559,10 @@ lpfc_hb_timeout(unsigned long ptr)
phba->pport->work_port_events |= WORKER_HB_TMO; phba->pport->work_port_events |= WORKER_HB_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait) if (phba->work_wait)
wake_up(phba->work_wait); wake_up(phba->work_wait);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
...@@ -714,12 +716,10 @@ lpfc_handle_eratt(struct lpfc_hba *phba) ...@@ -714,12 +716,10 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport; struct lpfc_vport *vport = phba->pport;
struct lpfc_sli *psli = &phba->sli; struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
struct lpfc_vport **vports;
uint32_t event_data; uint32_t event_data;
unsigned long temperature; unsigned long temperature;
struct temp_event temp_event_data; struct temp_event temp_event_data;
struct Scsi_Host *shost; struct Scsi_Host *shost;
int i;
/* If the pci channel is offline, ignore possible errors, /* If the pci channel is offline, ignore possible errors,
* since we cannot communicate with the pci card anyway. */ * since we cannot communicate with the pci card anyway. */
...@@ -737,17 +737,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) ...@@ -737,17 +737,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
"Data: x%x x%x x%x\n", "Data: x%x x%x x%x\n",
phba->work_hs, phba->work_hs,
phba->work_status[0], phba->work_status[1]); phba->work_status[0], phba->work_status[1]);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0;
i <= phba->max_vpi && vports[i] != NULL;
i++){
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE; psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
...@@ -761,7 +751,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba) ...@@ -761,7 +751,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
pring = &psli->ring[psli->fcp_ring]; pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring); lpfc_sli_abort_iocb_ring(phba, pring);
/* /*
* There was a firmware error. Take the hba offline and then * There was a firmware error. Take the hba offline and then
* attempt to restart it. * attempt to restart it.
...@@ -770,7 +759,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba) ...@@ -770,7 +759,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
lpfc_offline(phba); lpfc_offline(phba);
lpfc_sli_brdrestart(phba); lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) { /* Initialize the HBA */ if (lpfc_online(phba) == 0) { /* Initialize the HBA */
mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
lpfc_unblock_mgmt_io(phba); lpfc_unblock_mgmt_io(phba);
return; return;
} }
...@@ -1454,6 +1442,13 @@ lpfc_cleanup(struct lpfc_vport *vport) ...@@ -1454,6 +1442,13 @@ lpfc_cleanup(struct lpfc_vport *vport)
NLP_SET_FREE_REQ(ndlp); NLP_SET_FREE_REQ(ndlp);
spin_unlock_irq(&phba->ndlp_lock); spin_unlock_irq(&phba->ndlp_lock);
if (vport->port_type != LPFC_PHYSICAL_PORT &&
ndlp->nlp_DID == Fabric_DID) {
/* Just free up ndlp with Fabric_DID for vports */
lpfc_nlp_put(ndlp);
continue;
}
if (ndlp->nlp_type & NLP_FABRIC) if (ndlp->nlp_type & NLP_FABRIC)
lpfc_disc_state_machine(vport, ndlp, NULL, lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY); NLP_EVT_DEVICE_RECOVERY);
...@@ -1491,31 +1486,6 @@ lpfc_cleanup(struct lpfc_vport *vport) ...@@ -1491,31 +1486,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
return; return;
} }
static void
lpfc_establish_link_tmo(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
struct lpfc_vport **vports;
unsigned long iflag;
int i;
/* Re-establishing Link, timer expired */
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1300 Re-establishing Link, timer expired "
"Data: x%x x%x\n",
phba->pport->fc_flag, phba->pport->port_state);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irqsave(shost->host_lock, iflag);
vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
spin_unlock_irqrestore(shost->host_lock, iflag);
}
lpfc_destroy_vport_work_array(phba, vports);
}
void void
lpfc_stop_vport_timers(struct lpfc_vport *vport) lpfc_stop_vport_timers(struct lpfc_vport *vport)
{ {
...@@ -1529,7 +1499,6 @@ static void ...@@ -1529,7 +1499,6 @@ static void
lpfc_stop_phba_timers(struct lpfc_hba *phba) lpfc_stop_phba_timers(struct lpfc_hba *phba)
{ {
del_timer_sync(&phba->fcp_poll_timer); del_timer_sync(&phba->fcp_poll_timer);
del_timer_sync(&phba->fc_estabtmo);
lpfc_stop_vport_timers(phba->pport); lpfc_stop_vport_timers(phba->pport);
del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->sli.mbox_tmo);
del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->fabric_block_timer);
...@@ -2005,10 +1974,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -2005,10 +1974,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->max_vpi = LPFC_MAX_VPI; phba->max_vpi = LPFC_MAX_VPI;
/* Initialize timers used by driver */ /* Initialize timers used by driver */
init_timer(&phba->fc_estabtmo);
phba->fc_estabtmo.function = lpfc_establish_link_tmo;
phba->fc_estabtmo.data = (unsigned long)phba;
init_timer(&phba->hb_tmofunc); init_timer(&phba->hb_tmofunc);
phba->hb_tmofunc.function = lpfc_hb_timeout; phba->hb_tmofunc.function = lpfc_hb_timeout;
phba->hb_tmofunc.data = (unsigned long)phba; phba->hb_tmofunc.data = (unsigned long)phba;
...@@ -2416,11 +2381,6 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) ...@@ -2416,11 +2381,6 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev); pci_set_master(pdev);
/* Re-establishing Link */
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(shost->host_lock);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE; psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
...@@ -2445,9 +2405,7 @@ static void lpfc_io_resume(struct pci_dev *pdev) ...@@ -2445,9 +2405,7 @@ static void lpfc_io_resume(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev); struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
if (lpfc_online(phba) == 0) { lpfc_online(phba);
mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
}
} }
static struct pci_device_id lpfc_id_table[] = { static struct pci_device_id lpfc_id_table[] = {
......
...@@ -451,7 +451,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -451,7 +451,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
if ((ndlp->nlp_flag & NLP_ADISC_SND) && if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
(vport->num_disc_nodes)) { (vport->num_disc_nodes)) {
/* Check to see if there are more /* Check to see if there are more
* ADISCs to be sent * ADISCs to be sent
*/ */
...@@ -461,20 +461,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -461,20 +461,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(vport->fc_npr_cnt)) (vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport); lpfc_els_disc_plogi(vport);
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
}
else if (vport->num_disc_nodes) {
/* Check to see if there are more
* PLOGIs to be sent
*/
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) { if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE; vport->fc_flag &= ~FC_NDISC_ACTIVE;
...@@ -484,6 +470,23 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -484,6 +470,23 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
} }
} }
} }
} else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(vport->num_disc_nodes)) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
/* Check to see if there are more
* PLOGIs to be sent
*/
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
} }
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
...@@ -869,8 +872,11 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ...@@ -869,8 +872,11 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
lp = (uint32_t *) prsp->virt; lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
if (wwn_to_u64(sp->portName.u.wwn) == 0 ||
wwn_to_u64(sp->nodeName.u.wwn) == 0) { /* Some switches have FDMI servers returning 0 for WWN */
if ((ndlp->nlp_DID != FDMI_DID) &&
(wwn_to_u64(sp->portName.u.wwn) == 0 ||
wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0142 PLOGI RSP: Invalid WWN.\n"); "0142 PLOGI RSP: Invalid WWN.\n");
goto out; goto out;
......
...@@ -578,14 +578,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, ...@@ -578,14 +578,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == RJT_LOGIN_REQUIRED) { lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
cmd->result = ScsiResult(DID_REQUEUE, 0); cmd->result = ScsiResult(DID_REQUEUE, 0);
break; break;
} /* else: fall through */ } /* else: fall through */
default: default:
cmd->result = ScsiResult(DID_ERROR, 0); cmd->result = ScsiResult(DID_ERROR, 0);
break; break;
} }
if ((pnode == NULL ) if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE)) || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
} else { } else {
...@@ -626,7 +626,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, ...@@ -626,7 +626,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (!result) if (!result)
lpfc_rampup_queue_depth(vport, sdev); lpfc_rampup_queue_depth(vport, sdev);
if (!result && pnode != NULL && if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
((jiffies - pnode->last_ramp_up_time) > ((jiffies - pnode->last_ramp_up_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) && LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
((jiffies - pnode->last_q_full_time) > ((jiffies - pnode->last_q_full_time) >
...@@ -654,7 +654,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, ...@@ -654,7 +654,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* Check for queue full. If the lun is reporting queue full, then * Check for queue full. If the lun is reporting queue full, then
* back off the lun queue depth to prevent target overloads. * back off the lun queue depth to prevent target overloads.
*/ */
if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { if (result == SAM_STAT_TASK_SET_FULL && pnode &&
NLP_CHK_NODE_ACT(pnode)) {
pnode->last_q_full_time = jiffies; pnode->last_q_full_time = jiffies;
shost_for_each_device(tmp_sdev, sdev->host) { shost_for_each_device(tmp_sdev, sdev->host) {
...@@ -704,6 +705,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, ...@@ -704,6 +705,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
int datadir = scsi_cmnd->sc_data_direction; int datadir = scsi_cmnd->sc_data_direction;
char tag[2]; char tag[2];
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return;
lpfc_cmd->fcp_rsp->rspSnsLen = 0; lpfc_cmd->fcp_rsp->rspSnsLen = 0;
/* clear task management bits */ /* clear task management bits */
lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
...@@ -785,9 +789,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, ...@@ -785,9 +789,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *ndlp = rdata->pnode; struct lpfc_nodelist *ndlp = rdata->pnode;
if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
ndlp->nlp_state != NLP_STE_MAPPED_NODE)
return 0; return 0;
}
piocbq = &(lpfc_cmd->cur_iocbq); piocbq = &(lpfc_cmd->cur_iocbq);
piocbq->vport = vport; piocbq->vport = vport;
...@@ -842,7 +846,7 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, ...@@ -842,7 +846,7 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
struct lpfc_iocbq *iocbqrsp; struct lpfc_iocbq *iocbqrsp;
int ret; int ret;
if (!rdata->pnode) if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED; return FAILED;
lpfc_cmd->rdata = rdata; lpfc_cmd->rdata = rdata;
...@@ -959,7 +963,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) ...@@ -959,7 +963,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* Catch race where our node has transitioned, but the * Catch race where our node has transitioned, but the
* transport is still transitioning. * transport is still transitioning.
*/ */
if (!ndlp) { if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cmnd->result = ScsiResult(DID_BUS_BUSY, 0); cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
goto out_fail_command; goto out_fail_command;
} }
...@@ -1146,7 +1150,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) ...@@ -1146,7 +1150,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
* target is rediscovered or devloss timeout expires. * target is rediscovered or devloss timeout expires.
*/ */
while (1) { while (1) {
if (!pnode) if (!pnode || !NLP_CHK_NODE_ACT(pnode))
goto out; goto out;
if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
...@@ -1162,7 +1166,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) ...@@ -1162,7 +1166,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
goto out; goto out;
} }
pnode = rdata->pnode; pnode = rdata->pnode;
if (!pnode) if (!pnode || !NLP_CHK_NODE_ACT(pnode))
goto out; goto out;
} }
if (pnode->nlp_state == NLP_STE_MAPPED_NODE) if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
......
...@@ -2648,7 +2648,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) ...@@ -2648,7 +2648,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
spin_unlock_irq(&phba->pport->work_port_lock); spin_unlock_irq(&phba->pport->work_port_lock);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_UNKNOWN; phba->link_state = LPFC_LINK_UNKNOWN;
phba->pport->fc_flag |= FC_ESTABLISH_LINK;
psli->sli_flag &= ~LPFC_SLI2_ACTIVE; psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
...@@ -2669,8 +2668,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) ...@@ -2669,8 +2668,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
lpfc_offline_prep(phba); lpfc_offline_prep(phba);
lpfc_offline(phba); lpfc_offline(phba);
lpfc_sli_brdrestart(phba); lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) /* Initialize the HBA */ lpfc_online(phba);
mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
lpfc_unblock_mgmt_io(phba); lpfc_unblock_mgmt_io(phba);
return; return;
} }
...@@ -2687,28 +2685,41 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) ...@@ -2687,28 +2685,41 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
unsigned long drvr_flag = 0; unsigned long drvr_flag = 0;
volatile uint32_t word0, ldata; volatile uint32_t word0, ldata;
void __iomem *to_slim; void __iomem *to_slim;
int processing_queue = 0;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
if (!pmbox) {
/* processing mbox queue from intr_handler */
processing_queue = 1;
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
pmbox = lpfc_mbox_get(phba);
if (!pmbox) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return MBX_SUCCESS;
}
}
if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
if(!pmbox->vport) { if(!pmbox->vport) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
lpfc_printf_log(phba, KERN_ERR, lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_VPORT, LOG_MBOX | LOG_VPORT,
"1806 Mbox x%x failed. No vport\n", "1806 Mbox x%x failed. No vport\n",
pmbox->mb.mbxCommand); pmbox->mb.mbxCommand);
dump_stack(); dump_stack();
return MBX_NOT_FINISHED; goto out_not_finished;
} }
} }
/* If the PCI channel is in offline state, do not post mbox. */ /* If the PCI channel is in offline state, do not post mbox. */
if (unlikely(pci_channel_offline(phba->pcidev))) if (unlikely(pci_channel_offline(phba->pcidev))) {
return MBX_NOT_FINISHED; spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
goto out_not_finished;
}
spin_lock_irqsave(&phba->hbalock, drvr_flag);
psli = &phba->sli; psli = &phba->sli;
mb = &pmbox->mb; mb = &pmbox->mb;
status = MBX_SUCCESS; status = MBX_SUCCESS;
...@@ -2717,14 +2728,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) ...@@ -2717,14 +2728,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* Mbox command <mbxCommand> cannot issue */ /* Mbox command <mbxCommand> cannot issue */
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED; goto out_not_finished;
} }
if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
!(readl(phba->HCregaddr) & HC_MBINT_ENA)) { !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag); spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED; goto out_not_finished;
} }
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
...@@ -2738,14 +2749,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) ...@@ -2738,14 +2749,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* Mbox command <mbxCommand> cannot issue */ /* Mbox command <mbxCommand> cannot issue */
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED; goto out_not_finished;
} }
if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag); spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */ /* Mbox command <mbxCommand> cannot issue */
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED; goto out_not_finished;
} }
/* Another mailbox command is still being processed, queue this /* Another mailbox command is still being processed, queue this
...@@ -2792,7 +2803,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) ...@@ -2792,7 +2803,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
spin_unlock_irqrestore(&phba->hbalock, drvr_flag); spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */ /* Mbox command <mbxCommand> cannot issue */
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED; goto out_not_finished;
} }
/* timeout active mbox command */ /* timeout active mbox command */
mod_timer(&psli->mbox_tmo, (jiffies + mod_timer(&psli->mbox_tmo, (jiffies +
...@@ -2900,7 +2911,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) ...@@ -2900,7 +2911,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, spin_unlock_irqrestore(&phba->hbalock,
drvr_flag); drvr_flag);
return MBX_NOT_FINISHED; goto out_not_finished;
} }
/* Check if we took a mbox interrupt while we were /* Check if we took a mbox interrupt while we were
...@@ -2967,6 +2978,13 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) ...@@ -2967,6 +2978,13 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
spin_unlock_irqrestore(&phba->hbalock, drvr_flag); spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return status; return status;
out_not_finished:
if (processing_queue) {
pmbox->mb.mbxStatus = MBX_NOT_FINISHED;
lpfc_mbox_cmpl_put(phba, pmbox);
}
return MBX_NOT_FINISHED;
} }
/* /*
...@@ -3612,6 +3630,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -3612,6 +3630,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb, abort_iotag, abort_context, abort_iocb, abort_iotag, abort_context,
irsp->ulpStatus, irsp->un.ulpWord[4]); irsp->ulpStatus, irsp->un.ulpWord[4]);
/*
* If the iocb is not found in Firmware queue the iocb
* might have completed already. Do not free it again.
*/
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_XRI)) {
spin_unlock_irq(&phba->hbalock);
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
/* /*
* make sure we have the right iocbq before taking it * make sure we have the right iocbq before taking it
* off the txcmplq and try to call completion routine. * off the txcmplq and try to call completion routine.
...@@ -4237,10 +4265,15 @@ lpfc_intr_handler(int irq, void *dev_id) ...@@ -4237,10 +4265,15 @@ lpfc_intr_handler(int irq, void *dev_id)
pmb->context1 = mp; pmb->context1 = mp;
pmb->context2 = ndlp; pmb->context2 = ndlp;
pmb->vport = vport; pmb->vport = vport;
spin_lock(&phba->hbalock); rc = lpfc_sli_issue_mbox(phba,
phba->sli.sli_flag &= pmb,
~LPFC_SLI_MBOX_ACTIVE; MBX_NOWAIT);
spin_unlock(&phba->hbalock); if (rc != MBX_BUSY)
lpfc_printf_log(phba,
KERN_ERR,
LOG_MBOX | LOG_SLI,
"0306 rc should have"
"been MBX_BUSY");
goto send_current_mbox; goto send_current_mbox;
} }
} }
...@@ -4253,22 +4286,16 @@ lpfc_intr_handler(int irq, void *dev_id) ...@@ -4253,22 +4286,16 @@ lpfc_intr_handler(int irq, void *dev_id)
} }
if ((work_ha_copy & HA_MBATT) && if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active == NULL)) { (phba->sli.mbox_active == NULL)) {
send_next_mbox:
spin_lock(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
pmb = lpfc_mbox_get(phba);
spin_unlock(&phba->hbalock);
send_current_mbox: send_current_mbox:
/* Process next mailbox command if there is one */ /* Process next mailbox command if there is one */
if (pmb != NULL) { do {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); rc = lpfc_sli_issue_mbox(phba, NULL,
if (rc == MBX_NOT_FINISHED) { MBX_NOWAIT);
pmb->mb.mbxStatus = MBX_NOT_FINISHED; } while (rc == MBX_NOT_FINISHED);
lpfc_mbox_cmpl_put(phba, pmb); if (rc != MBX_SUCCESS)
goto send_next_mbox; lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
} LOG_SLI, "0349 rc should be "
} "MBX_SUCCESS");
} }
spin_lock(&phba->hbalock); spin_lock(&phba->hbalock);
......
...@@ -538,7 +538,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport) ...@@ -538,7 +538,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
/* Otherwise, we will perform fabric logo as needed */ /* Otherwise, we will perform fabric logo as needed */
if (ndlp && NLP_CHK_NODE_ACT(ndlp) && if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP) { phba->link_state >= LPFC_LINK_UP &&
phba->fc_topology != TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) { if (vport->cfg_enable_da_id) {
timeout = msecs_to_jiffies(phba->fc_ratov * 2000); timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment