Commit 98c9ea5c authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.2.3 : Miscellaneous Small Fixes - part 1

Miscellaneous Small Fixes - part 1
- Fix typo kmzlloc -> kzalloc
- Fix discovery ndlp use after free panic
- Fix link event causing flood of 0108 messages
- Relieve some mbox congestion on link up with 100 vports
- Fix broken vport parameters
- Prevent lock recursion in logo_reglogin_issue
- Split uses of error variable in lpfc_pci_probe_one into retval and error
- Remove completion code related to dev_loss_tmo
- Remove unused LPFC_MAX_HBQ #define
- Don't compare pointers to 0 for sparse
- Make 2 functions static for sparse
- Fix default rpi cleanup code causing rogue ndlps to remain on the NPR list
- Remove annoying ELS messages when driver is unloaded
- Fix Cannot issue Register Fabric login problems on link up
- Remove LPFC_EVT_DEV_LOSS_DELAY
- Fix FC port swap test leads to device going offline
- Fix vport CT flags to only be set when accepted
- Add code to handle signals during vport_create
- Fix too many retries in FC-AL mode
- Pull lpfc_port_link_failure out of lpfc_linkdown_port
Signed-off-by: default avatarJames Smart <James.Smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent 0b727fea
...@@ -2363,13 +2363,67 @@ struct fc_function_template lpfc_transport_functions = { ...@@ -2363,13 +2363,67 @@ struct fc_function_template lpfc_transport_functions = {
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io, .terminate_rport_io = lpfc_terminate_rport_io,
/* Vport fields are filled in at runtime based on enable_npiv */
.vport_create = NULL,
.vport_delete = NULL,
.vport_disable = NULL,
.dd_fcvport_size = sizeof(struct lpfc_vport *), .dd_fcvport_size = sizeof(struct lpfc_vport *),
}; };
struct fc_function_template lpfc_vport_transport_functions = {
/* fixed attributes the driver supports */
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
/* dynamic attributes the driver supports */
.get_host_port_id = lpfc_get_host_port_id,
.show_host_port_id = 1,
.get_host_port_type = lpfc_get_host_port_type,
.show_host_port_type = 1,
.get_host_port_state = lpfc_get_host_port_state,
.show_host_port_state = 1,
/* active_fc4s is shown but doesn't change (thus no get function) */
.show_host_active_fc4s = 1,
.get_host_speed = lpfc_get_host_speed,
.show_host_speed = 1,
.get_host_fabric_name = lpfc_get_host_fabric_name,
.show_host_fabric_name = 1,
/*
* The LPFC driver treats linkdown handling as target loss events
* so there are no sysfs handlers for link_down_tmo.
*/
.get_fc_host_stats = lpfc_get_stats,
.reset_fc_host_stats = lpfc_reset_stats,
.dd_fcrport_size = sizeof(struct lpfc_rport_data),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.get_starget_port_id = lpfc_get_starget_port_id,
.show_starget_port_id = 1,
.get_starget_node_name = lpfc_get_starget_node_name,
.show_starget_node_name = 1,
.get_starget_port_name = lpfc_get_starget_port_name,
.show_starget_port_name = 1,
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
.vport_disable = lpfc_vport_disable,
};
void void
lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_get_cfgparam(struct lpfc_hba *phba)
{ {
......
...@@ -68,6 +68,7 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, ...@@ -68,6 +68,7 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t); void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *); int lpfc_nlp_put(struct lpfc_nodelist *);
int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *); void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *); void lpfc_disc_start(struct lpfc_vport *);
...@@ -260,6 +261,7 @@ extern struct class_device_attribute *lpfc_vport_attrs[]; ...@@ -260,6 +261,7 @@ extern struct class_device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template; extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_vport_template; extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
extern int lpfc_sli_mode; extern int lpfc_sli_mode;
extern int lpfc_enable_npiv; extern int lpfc_enable_npiv;
......
...@@ -458,7 +458,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) ...@@ -458,7 +458,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
((lpfc_find_vport_by_did(phba, Did) == NULL) || ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
vport->cfg_peer_port_login)) { vport->cfg_peer_port_login)) {
if ((vport->port_type != LPFC_NPIV_PORT) || if ((vport->port_type != LPFC_NPIV_PORT) ||
(!vport->ct_flags & FC_CT_RFF_ID) || (!(vport->ct_flags & FC_CT_RFF_ID)) ||
(!vport->cfg_restrict_login)) { (!vport->cfg_restrict_login)) {
ndlp = lpfc_setup_disc_node(vport, Did); ndlp = lpfc_setup_disc_node(vport, Did);
if (ndlp) { if (ndlp) {
...@@ -854,8 +854,16 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -854,8 +854,16 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp = &rspiocb->iocb; IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_vport *vport = cmdiocb->vport;
if (irsp->ulpStatus == IOSTAT_SUCCESS) if (irsp->ulpStatus == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RFT_ID; vport->ct_flags |= FC_CT_RFT_ID;
}
lpfc_cmpl_ct(phba, cmdiocb, rspiocb); lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return; return;
} }
...@@ -867,8 +875,16 @@ lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -867,8 +875,16 @@ lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp = &rspiocb->iocb; IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_vport *vport = cmdiocb->vport;
if (irsp->ulpStatus == IOSTAT_SUCCESS) if (irsp->ulpStatus == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RNN_ID; vport->ct_flags |= FC_CT_RNN_ID;
}
lpfc_cmpl_ct(phba, cmdiocb, rspiocb); lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return; return;
} }
...@@ -880,8 +896,16 @@ lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -880,8 +896,16 @@ lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp = &rspiocb->iocb; IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_vport *vport = cmdiocb->vport;
if (irsp->ulpStatus == IOSTAT_SUCCESS) if (irsp->ulpStatus == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RSPN_ID; vport->ct_flags |= FC_CT_RSPN_ID;
}
lpfc_cmpl_ct(phba, cmdiocb, rspiocb); lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return; return;
} }
...@@ -893,8 +917,16 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -893,8 +917,16 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp = &rspiocb->iocb; IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_vport *vport = cmdiocb->vport;
if (irsp->ulpStatus == IOSTAT_SUCCESS) if (irsp->ulpStatus == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RSNN_NN; vport->ct_flags |= FC_CT_RSNN_NN;
}
lpfc_cmpl_ct(phba, cmdiocb, rspiocb); lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return; return;
} }
...@@ -918,8 +950,16 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -918,8 +950,16 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp = &rspiocb->iocb; IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_vport *vport = cmdiocb->vport;
if (irsp->ulpStatus == IOSTAT_SUCCESS) if (irsp->ulpStatus == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RFF_ID; vport->ct_flags |= FC_CT_RFF_ID;
}
lpfc_cmpl_ct(phba, cmdiocb, rspiocb); lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return; return;
} }
......
...@@ -36,7 +36,6 @@ enum lpfc_work_type { ...@@ -36,7 +36,6 @@ enum lpfc_work_type {
LPFC_EVT_WARM_START, LPFC_EVT_WARM_START,
LPFC_EVT_KILL, LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY, LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS_DELAY,
LPFC_EVT_DEV_LOSS, LPFC_EVT_DEV_LOSS,
}; };
...@@ -104,6 +103,7 @@ struct lpfc_nodelist { ...@@ -104,6 +103,7 @@ struct lpfc_nodelist {
#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */ #define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */
#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ #define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
#define NLP_DELAYED_RM 0x20000000 /* Defer UNUSED List removal */
/* There are 4 different double linked lists nodelist entries can reside on. /* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
......
This diff is collapsed.
...@@ -107,7 +107,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ...@@ -107,7 +107,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_nodelist * ndlp; struct lpfc_nodelist * ndlp;
struct lpfc_vport *vport; struct lpfc_vport *vport;
struct lpfc_hba *phba; struct lpfc_hba *phba;
struct completion devloss_compl;
struct lpfc_work_evt *evtp; struct lpfc_work_evt *evtp;
rdata = rport->dd_data; rdata = rport->dd_data;
...@@ -129,7 +128,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ...@@ -129,7 +128,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
"rport devlosscb: sid:x%x did:x%x flg:x%x", "rport devlosscb: sid:x%x did:x%x flg:x%x",
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
init_completion(&devloss_compl);
evtp = &ndlp->dev_loss_evt; evtp = &ndlp->dev_loss_evt;
if (!list_empty(&evtp->evt_listp)) if (!list_empty(&evtp->evt_listp))
...@@ -137,7 +135,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ...@@ -137,7 +135,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
evtp->evt_arg1 = ndlp; evtp->evt_arg1 = ndlp;
evtp->evt_arg2 = &devloss_compl;
evtp->evt = LPFC_EVT_DEV_LOSS; evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list); list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait) if (phba->work_wait)
...@@ -145,8 +142,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ...@@ -145,8 +142,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
wait_for_completion(&devloss_compl);
return; return;
} }
...@@ -260,7 +255,6 @@ lpfc_work_list_done(struct lpfc_hba *phba) ...@@ -260,7 +255,6 @@ lpfc_work_list_done(struct lpfc_hba *phba)
{ {
struct lpfc_work_evt *evtp = NULL; struct lpfc_work_evt *evtp = NULL;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
struct lpfc_vport *vport;
int free_evt; int free_evt;
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
...@@ -270,24 +264,6 @@ lpfc_work_list_done(struct lpfc_hba *phba) ...@@ -270,24 +264,6 @@ lpfc_work_list_done(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
free_evt = 1; free_evt = 1;
switch (evtp->evt) { switch (evtp->evt) {
case LPFC_EVT_DEV_LOSS_DELAY:
free_evt = 0; /* evt is part of ndlp */
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
vport = ndlp->vport;
if (!vport)
break;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
"rport devlossdly:did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
if (!(vport->load_flag & FC_UNLOADING) &&
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
break;
case LPFC_EVT_ELS_RETRY: case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
lpfc_els_retry_delay_handler(ndlp); lpfc_els_retry_delay_handler(ndlp);
...@@ -298,7 +274,6 @@ lpfc_work_list_done(struct lpfc_hba *phba) ...@@ -298,7 +274,6 @@ lpfc_work_list_done(struct lpfc_hba *phba)
lpfc_nlp_get(ndlp); lpfc_nlp_get(ndlp);
lpfc_dev_loss_tmo_handler(ndlp); lpfc_dev_loss_tmo_handler(ndlp);
free_evt = 0; free_evt = 0;
complete((struct completion *)(evtp->evt_arg2));
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
break; break;
case LPFC_EVT_ONLINE: case LPFC_EVT_ONLINE:
...@@ -552,7 +527,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) ...@@ -552,7 +527,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue; continue;
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
((vport->port_type == LPFC_NPIV_PORT) &&
(ndlp->nlp_DID == NameServer_DID)))
lpfc_unreg_rpi(vport, ndlp); lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */ /* Leave Fabric nodes alone on link down */
...@@ -570,16 +547,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) ...@@ -570,16 +547,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
} }
static void static void
lpfc_linkdown_port(struct lpfc_vport *vport) lpfc_port_link_failure(struct lpfc_vport *vport)
{ {
struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_nodelist *ndlp, *next_ndlp;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Link Down: state:x%x rtry:x%x flg:x%x",
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Cleanup any outstanding RSCN activity */ /* Cleanup any outstanding RSCN activity */
lpfc_els_flush_rscn(vport); lpfc_els_flush_rscn(vport);
...@@ -598,6 +568,21 @@ lpfc_linkdown_port(struct lpfc_vport *vport) ...@@ -598,6 +568,21 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
lpfc_can_disctmo(vport); lpfc_can_disctmo(vport);
} }
static void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Link Down: state:x%x rtry:x%x flg:x%x",
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
lpfc_port_link_failure(vport);
}
int int
lpfc_linkdown(struct lpfc_hba *phba) lpfc_linkdown(struct lpfc_hba *phba)
{ {
...@@ -851,8 +836,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ...@@ -851,8 +836,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* LPFC_FLOGI while waiting for FLOGI cmpl * LPFC_FLOGI while waiting for FLOGI cmpl
*/ */
if (vport->port_state != LPFC_FLOGI) { if (vport->port_state != LPFC_FLOGI) {
vport->port_state = LPFC_FLOGI;
lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport); lpfc_initial_flogi(vport);
} }
return; return;
...@@ -1622,6 +1605,16 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -1622,6 +1605,16 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~NLP_FC_NODE; ndlp->nlp_type &= ~NLP_FC_NODE;
} }
if ((old_state == NLP_STE_UNUSED_NODE) &&
(state != NLP_STE_UNUSED_NODE) &&
(ndlp->nlp_flag & NLP_DELAYED_RM)) {
/* We are using the ndlp after all, so reverse
* the delayed removal of it.
*/
ndlp->nlp_flag &= ~NLP_DELAYED_RM;
lpfc_nlp_get(ndlp);
}
if (list_empty(&ndlp->nlp_listp)) { if (list_empty(&ndlp->nlp_listp)) {
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
...@@ -1654,7 +1647,9 @@ void ...@@ -1654,7 +1647,9 @@ void
lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{ {
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
if (!(ndlp->nlp_flag & NLP_DELAYED_RM))
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
return;
} }
/* /*
...@@ -1975,11 +1970,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ...@@ -1975,11 +1970,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
list_del_init(&ndlp->dev_loss_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp);
if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
list_del_init(&ndlp->dev_loss_evt.evt_listp);
complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
}
lpfc_unreg_rpi(vport, ndlp); lpfc_unreg_rpi(vport, ndlp);
return 0; return 0;
...@@ -2418,7 +2408,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport) ...@@ -2418,7 +2408,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
lpfc_free_tx(phba, ndlp); lpfc_free_tx(phba, ndlp);
lpfc_nlp_put(ndlp);
} }
} }
} }
...@@ -2516,8 +2505,6 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) ...@@ -2516,8 +2505,6 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
} }
} }
if (vport->port_state != LPFC_FLOGI) { if (vport->port_state != LPFC_FLOGI) {
vport->port_state = LPFC_FLOGI;
lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport); lpfc_initial_flogi(vport);
} }
break; break;
...@@ -2828,6 +2815,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -2828,6 +2815,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return; return;
} }
/* This routine releases all resources associated with a specifc NPort's ndlp
* and mempool_free's the nodelist.
*/
static void static void
lpfc_nlp_release(struct kref *kref) lpfc_nlp_release(struct kref *kref)
{ {
...@@ -2842,16 +2832,57 @@ lpfc_nlp_release(struct kref *kref) ...@@ -2842,16 +2832,57 @@ lpfc_nlp_release(struct kref *kref)
mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
} }
/* This routine bumps the reference count for a ndlp structure to ensure
* that one discovery thread won't free a ndlp while another discovery thread
* is using it.
*/
struct lpfc_nodelist * struct lpfc_nodelist *
lpfc_nlp_get(struct lpfc_nodelist *ndlp) lpfc_nlp_get(struct lpfc_nodelist *ndlp)
{ {
if (ndlp) if (ndlp) {
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node get: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount));
kref_get(&ndlp->kref); kref_get(&ndlp->kref);
}
return ndlp; return ndlp;
} }
/* This routine decrements the reference count for a ndlp structure. If the
* count goes to 0, this indicates the the associated nodelist should be freed.
*/
int int
lpfc_nlp_put(struct lpfc_nodelist *ndlp) lpfc_nlp_put(struct lpfc_nodelist *ndlp)
{ {
if (ndlp) {
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node put: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount));
}
return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
} }
/* This routine free's the specified nodelist if it is not in use
* by any other discovery thread. This routine returns 1 if the ndlp
* is not being used by anyone and has been freed. A return value of
* 0 indicates it is being used by another discovery thread and the
* refcount is left unchanged.
*/
int
lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
{
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node not used: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount));
if (atomic_read(&ndlp->kref.refcount) == 1) {
lpfc_nlp_put(ndlp);
return 1;
}
return 0;
}
...@@ -1143,9 +1143,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, ...@@ -1143,9 +1143,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
/* Allocate buffer to post */ /* Allocate buffer to post */
mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
if (mp1) if (mp1)
mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
&mp1->phys); if (!mp1 || !mp1->virt) {
if (mp1 == 0 || mp1->virt == 0) {
kfree(mp1); kfree(mp1);
lpfc_sli_release_iocbq(phba, iocb); lpfc_sli_release_iocbq(phba, iocb);
pring->missbufcnt = cnt; pring->missbufcnt = cnt;
...@@ -1159,7 +1158,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, ...@@ -1159,7 +1158,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
if (mp2) if (mp2)
mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&mp2->phys); &mp2->phys);
if (mp2 == 0 || mp2->virt == 0) { if (!mp2 || !mp2->virt) {
kfree(mp2); kfree(mp2);
lpfc_mbuf_free(phba, mp1->virt, mp1->phys); lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
kfree(mp1); kfree(mp1);
...@@ -1762,7 +1761,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -1762,7 +1761,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
struct Scsi_Host *shost = NULL; struct Scsi_Host *shost = NULL;
void *ptr; void *ptr;
unsigned long bar0map_len, bar2map_len; unsigned long bar0map_len, bar2map_len;
int error = -ENODEV; int error = -ENODEV, retval;
int i, hbq_count; int i, hbq_count;
uint16_t iotag; uint16_t iotag;
...@@ -1878,9 +1877,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -1878,9 +1877,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_sli_setup(phba); lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba); lpfc_sli_queue_setup(phba);
error = lpfc_mem_alloc(phba); retval = lpfc_mem_alloc(phba);
if (error) if (retval) {
error = retval;
goto out_free_hbqslimp; goto out_free_hbqslimp;
}
/* Initialize and populate the iocb list per host. */ /* Initialize and populate the iocb list per host. */
INIT_LIST_HEAD(&phba->lpfc_iocb_list); INIT_LIST_HEAD(&phba->lpfc_iocb_list);
...@@ -1946,8 +1947,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -1946,8 +1947,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
pci_set_drvdata(pdev, shost); pci_set_drvdata(pdev, shost);
if (phba->cfg_use_msi) { if (phba->cfg_use_msi) {
error = pci_enable_msi(phba->pcidev); retval = pci_enable_msi(phba->pcidev);
if (!error) if (!retval)
phba->using_msi = 1; phba->using_msi = 1;
else else
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
...@@ -1955,11 +1956,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -1955,11 +1956,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
"with IRQ\n"); "with IRQ\n");
} }
error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
LPFC_DRIVER_NAME, phba); LPFC_DRIVER_NAME, phba);
if (error) { if (retval) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0451 Enable interrupt handler failed\n"); "0451 Enable interrupt handler failed\n");
error = retval;
goto out_disable_msi; goto out_disable_msi;
} }
...@@ -1969,11 +1971,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -1969,11 +1971,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
if (lpfc_alloc_sysfs_attr(vport)) if (lpfc_alloc_sysfs_attr(vport)) {
error = -ENOMEM;
goto out_free_irq; goto out_free_irq;
}
if (lpfc_sli_hba_setup(phba)) if (lpfc_sli_hba_setup(phba)) {
error = -ENODEV;
goto out_remove_device; goto out_remove_device;
}
/* /*
* hba setup may have changed the hba_queue_depth so we need to adjust * hba setup may have changed the hba_queue_depth so we need to adjust
...@@ -2303,15 +2309,13 @@ lpfc_init(void) ...@@ -2303,15 +2309,13 @@ lpfc_init(void)
if (lpfc_transport_template == NULL) if (lpfc_transport_template == NULL)
return -ENOMEM; return -ENOMEM;
if (lpfc_enable_npiv) { if (lpfc_enable_npiv) {
lpfc_transport_functions.vport_create = NULL;
lpfc_transport_functions.vport_delete = NULL;
lpfc_transport_functions.issue_fc_host_lip = NULL;
lpfc_transport_functions.vport_disable = lpfc_vport_disable;
lpfc_vport_transport_template = lpfc_vport_transport_template =
fc_attach_transport(&lpfc_transport_functions); fc_attach_transport(&lpfc_vport_transport_functions);
if (lpfc_vport_transport_template == NULL) if (lpfc_vport_transport_template == NULL) {
fc_release_transport(lpfc_transport_template);
return -ENOMEM; return -ENOMEM;
} }
}
error = pci_register_driver(&lpfc_driver); error = pci_register_driver(&lpfc_driver);
if (error) { if (error) {
fc_release_transport(lpfc_transport_template); fc_release_transport(lpfc_transport_template);
......
...@@ -288,8 +288,10 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) ...@@ -288,8 +288,10 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
/* Get a buffer to hold the HBAs Service Parameters */ /* Get a buffer to hold the HBAs Service Parameters */
if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) || mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) { if (mp)
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp || !mp->virt) {
kfree(mp); kfree(mp);
mb->mbxCommand = MBX_READ_SPARM64; mb->mbxCommand = MBX_READ_SPARM64;
/* READ_SPARAM: no buffers */ /* READ_SPARAM: no buffers */
...@@ -387,8 +389,10 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, ...@@ -387,8 +389,10 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->mbxOwner = OWN_HOST; mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold NPorts Service Parameters */ /* Get a buffer to hold NPorts Service Parameters */
if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == NULL) || mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) { if (mp)
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp || !mp->virt) {
kfree(mp); kfree(mp);
mb->mbxCommand = MBX_REG_LOGIN64; mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */ /* REG_LOGIN: no buffers */
......
...@@ -1135,7 +1135,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, ...@@ -1135,7 +1135,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
(ndlp == (struct lpfc_nodelist *) mb->context2)) { (ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1); mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) { if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys); __lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp); kfree(mp);
} }
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
......
...@@ -1914,8 +1914,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) ...@@ -1914,8 +1914,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
"0329 Kill HBA Data: x%x x%x\n", "0329 Kill HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag); phba->pport->port_state, psli->sli_flag);
if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
GFP_KERNEL)) == 0) if (!pmb)
return 1; return 1;
/* Disable the error attention */ /* Disable the error attention */
...@@ -2809,7 +2809,7 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ...@@ -2809,7 +2809,7 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* /*
* Lockless version of lpfc_sli_issue_iocb. * Lockless version of lpfc_sli_issue_iocb.
*/ */
int static int
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag) struct lpfc_iocbq *piocb, uint32_t flag)
{ {
...@@ -2954,7 +2954,7 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) ...@@ -2954,7 +2954,7 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0; return 0;
} }
void static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba, lpfc_sli_async_event_handler(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
{ {
...@@ -3717,7 +3717,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, ...@@ -3717,7 +3717,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
unsigned long flag; unsigned long flag;
/* The caller must leave context1 empty. */ /* The caller must leave context1 empty. */
if (pmboxq->context1 != 0) if (pmboxq->context1)
return MBX_NOT_FINISHED; return MBX_NOT_FINISHED;
/* setup wake call as IOCB callback */ /* setup wake call as IOCB callback */
......
...@@ -197,9 +197,6 @@ struct lpfc_hbq_init { ...@@ -197,9 +197,6 @@ struct lpfc_hbq_init {
uint32_t add_count; /* number to allocate when starved */ uint32_t add_count; /* number to allocate when starved */
} ; } ;
#define LPFC_MAX_HBQ 16
/* Structure used to hold SLI statistical counters and info */ /* Structure used to hold SLI statistical counters and info */
struct lpfc_sli_stat { struct lpfc_sli_stat {
uint64_t mbox_stat_err; /* Mbox cmds completed status error */ uint64_t mbox_stat_err; /* Mbox cmds completed status error */
......
...@@ -125,6 +125,16 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) ...@@ -125,6 +125,16 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
pmb->vport = vport; pmb->vport = vport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) { if (rc != MBX_SUCCESS) {
if (signal_pending(current)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
"1830 Signal aborted mbxCmd x%x\n",
mb->mbxCommand);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
if (rc != MBX_TIMEOUT)
mempool_free(pmb, phba->mbox_mem_pool);
return -EINTR;
} else {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
"1818 VPort failed init, mbxCmd x%x " "1818 VPort failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x, rc = x%x\n", "READ_SPARM mbxStatus x%x, rc = x%x\n",
...@@ -135,6 +145,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) ...@@ -135,6 +145,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
mempool_free(pmb, phba->mbox_mem_pool); mempool_free(pmb, phba->mbox_mem_pool);
return -EIO; return -EIO;
} }
}
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
...@@ -204,6 +215,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) ...@@ -204,6 +215,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
int instance; int instance;
int vpi; int vpi;
int rc = VPORT_ERROR; int rc = VPORT_ERROR;
int status;
if ((phba->sli_rev < 3) || if ((phba->sli_rev < 3) ||
!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
...@@ -248,13 +260,19 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) ...@@ -248,13 +260,19 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->vpi = vpi; vport->vpi = vpi;
lpfc_debugfs_initialize(vport); lpfc_debugfs_initialize(vport);
if (lpfc_vport_sparm(phba, vport)) { if ((status = lpfc_vport_sparm(phba, vport))) {
if (status == -EINTR) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1831 Create VPORT Interrupted.\n");
rc = VPORT_ERROR;
} else {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1813 Create VPORT failed. " "1813 Create VPORT failed. "
"Cannot get sparam\n"); "Cannot get sparam\n");
rc = VPORT_NORESOURCES;
}
lpfc_free_vpi(phba, vpi); lpfc_free_vpi(phba, vpi);
destroy_port(vport); destroy_port(vport);
rc = VPORT_NORESOURCES;
goto error_out; goto error_out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment