Commit 4d9ab994 authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.3.5: fix reset path, ELS ordering and discovery issues

This patch includes the following fixes:
- Fixed panic during HBA reset.
- Fixed FCoE event tag passed in resume_rpi.
- Fix out of order ELS commands
- Fixed discovery issues found during VLAN testing.
- Fix UNREG_VPI failure on extended link pull
- Fixed crash while processing unsolicited FC frames.
- Clear retry count in the delayed ELS handler
- Fixed discovery failure during quick link bounce.
Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent 1796e722
...@@ -109,7 +109,7 @@ struct hbq_dmabuf { ...@@ -109,7 +109,7 @@ struct hbq_dmabuf {
struct lpfc_dmabuf dbuf; struct lpfc_dmabuf dbuf;
uint32_t size; uint32_t size;
uint32_t tag; uint32_t tag;
struct lpfc_rcqe rcqe; struct lpfc_cq_event cq_event;
}; };
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ /* Priority bit. Set value to exceed low water mark in lpfc_mem. */
...@@ -551,6 +551,7 @@ struct lpfc_hba { ...@@ -551,6 +551,7 @@ struct lpfc_hba {
uint8_t fc_linkspeed; /* Link speed after last READ_LA */ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t fc_eventTag; /* event tag for link attention */ uint32_t fc_eventTag; /* event tag for link attention */
uint32_t link_events;
/* These fields used to be binfo */ /* These fields used to be binfo */
uint32_t fc_pref_DID; /* preferred D_ID */ uint32_t fc_pref_DID; /* preferred D_ID */
......
...@@ -3815,7 +3815,11 @@ lpfc_get_stats(struct Scsi_Host *shost) ...@@ -3815,7 +3815,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->invalid_crc_count -= lso->invalid_crc_count; hs->invalid_crc_count -= lso->invalid_crc_count;
hs->error_frames -= lso->error_frames; hs->error_frames -= lso->error_frames;
if (phba->fc_topology == TOPOLOGY_LOOP) { if (phba->hba_flag & HBA_FCOE_SUPPORT) {
hs->lip_count = -1;
hs->nos_count = (phba->link_events >> 1);
hs->nos_count -= lso->link_events;
} else if (phba->fc_topology == TOPOLOGY_LOOP) {
hs->lip_count = (phba->fc_eventTag >> 1); hs->lip_count = (phba->fc_eventTag >> 1);
hs->lip_count -= lso->link_events; hs->lip_count -= lso->link_events;
hs->nos_count = -1; hs->nos_count = -1;
...@@ -3906,6 +3910,9 @@ lpfc_reset_stats(struct Scsi_Host *shost) ...@@ -3906,6 +3910,9 @@ lpfc_reset_stats(struct Scsi_Host *shost)
lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
lso->error_frames = pmb->un.varRdLnk.crcCnt; lso->error_frames = pmb->un.varRdLnk.crcCnt;
if (phba->hba_flag & HBA_FCOE_SUPPORT)
lso->link_events = (phba->link_events >> 1);
else
lso->link_events = (phba->fc_eventTag >> 1); lso->link_events = (phba->fc_eventTag >> 1);
psli->stats_start = get_seconds(); psli->stats_start = get_seconds();
......
...@@ -235,7 +235,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); ...@@ -235,7 +235,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
int lpfc_sli_check_eratt(struct lpfc_hba *); int lpfc_sli_check_eratt(struct lpfc_hba *);
void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t); struct lpfc_sli_ring *, uint32_t);
int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t); struct lpfc_iocbq *, uint32_t);
......
...@@ -2452,6 +2452,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) ...@@ -2452,6 +2452,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
*/ */
del_timer_sync(&ndlp->nlp_delayfunc); del_timer_sync(&ndlp->nlp_delayfunc);
retry = ndlp->nlp_retry; retry = ndlp->nlp_retry;
ndlp->nlp_retry = 0;
switch (cmd) { switch (cmd) {
case ELS_CMD_FLOGI: case ELS_CMD_FLOGI:
......
...@@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba) ...@@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} }
if (phba->hba_flag & HBA_RECEIVE_BUFFER)
lpfc_sli4_handle_received_buffer(phba);
} }
vports = lpfc_create_vport_work_array(phba); vports = lpfc_create_vport_work_array(phba);
...@@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba) ...@@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba)
pring = &phba->sli.ring[LPFC_ELS_RING]; pring = &phba->sli.ring[LPFC_ELS_RING];
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING); status >>= (4*LPFC_ELS_RING);
if ((status & HA_RXMASK) if ((status & HA_RXMASK) ||
|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) { (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
(phba->hba_flag & HBA_RECEIVE_BUFFER)) {
if (pring->flag & LPFC_STOP_IOCB_EVENT) { if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT; pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */ /* Set the lpfc data pending flag */
...@@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) ...@@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
lpfc_unreg_rpi(vport, ndlp); lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */ /* Leave Fabric nodes alone on link down */
if (!remove && ndlp->nlp_type & NLP_FABRIC) if ((phba->sli_rev < LPFC_SLI_REV4) &&
(!remove && ndlp->nlp_type & NLP_FABRIC))
continue; continue;
rc = lpfc_disc_state_machine(vport, ndlp, NULL, rc = lpfc_disc_state_machine(vport, ndlp, NULL,
remove remove
...@@ -1015,10 +1015,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) ...@@ -1015,10 +1015,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mempool_free(mboxq, phba->mbox_mem_pool); mempool_free(mboxq, phba->mbox_mem_pool);
return; return;
} }
if (vport->port_state != LPFC_FLOGI) {
spin_lock_irqsave(&phba->hbalock, flags);
phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
phba->hba_flag &= ~FCF_DISC_INPROGRESS; phba->hba_flag &= ~FCF_DISC_INPROGRESS;
if (vport->port_state != LPFC_FLOGI) {
spin_lock_irqsave(&phba->hbalock, flags);
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_initial_flogi(vport); lpfc_initial_flogi(vport);
} }
...@@ -1199,6 +1199,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) ...@@ -1199,6 +1199,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
/* If the FCF is not availabe do nothing. */ /* If the FCF is not availabe do nothing. */
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
return; return;
} }
...@@ -1216,15 +1217,23 @@ lpfc_register_fcf(struct lpfc_hba *phba) ...@@ -1216,15 +1217,23 @@ lpfc_register_fcf(struct lpfc_hba *phba)
fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL); GFP_KERNEL);
if (!fcf_mbxq) if (!fcf_mbxq) {
spin_lock_irqsave(&phba->hbalock, flags);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
return; return;
}
lpfc_reg_fcfi(phba, fcf_mbxq); lpfc_reg_fcfi(phba, fcf_mbxq);
fcf_mbxq->vport = phba->pport; fcf_mbxq->vport = phba->pport;
fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) if (rc == MBX_NOT_FINISHED) {
spin_lock_irqsave(&phba->hbalock, flags);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
mempool_free(fcf_mbxq, phba->mbox_mem_pool); mempool_free(fcf_mbxq, phba->mbox_mem_pool);
}
return; return;
} }
...@@ -1253,6 +1262,20 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, ...@@ -1253,6 +1262,20 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
uint16_t *vlan_id) uint16_t *vlan_id)
{ {
struct lpfc_fcf_conn_entry *conn_entry; struct lpfc_fcf_conn_entry *conn_entry;
int i, j, fcf_vlan_id = 0;
/* Find the lowest VLAN id in the FCF record */
for (i = 0; i < 512; i++) {
if (new_fcf_record->vlan_bitmap[i]) {
fcf_vlan_id = i * 8;
j = 0;
while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
j++;
fcf_vlan_id++;
}
break;
}
}
/* If FCF not available return 0 */ /* If FCF not available return 0 */
if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
...@@ -1286,6 +1309,10 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, ...@@ -1286,6 +1309,10 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
if (*addr_mode & LPFC_FCF_FPMA) if (*addr_mode & LPFC_FCF_FPMA)
*addr_mode = LPFC_FCF_FPMA; *addr_mode = LPFC_FCF_FPMA;
/* If FCF record report a vlan id use that vlan id */
if (fcf_vlan_id)
*vlan_id = fcf_vlan_id;
else
*vlan_id = 0xFFFF; *vlan_id = 0xFFFF;
return 1; return 1;
} }
...@@ -1384,8 +1411,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, ...@@ -1384,8 +1411,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
(*addr_mode & LPFC_FCF_FPMA)) (*addr_mode & LPFC_FCF_FPMA))
*addr_mode = LPFC_FCF_FPMA; *addr_mode = LPFC_FCF_FPMA;
/* If matching connect list has a vlan id, use it */
if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
*vlan_id = conn_entry->conn_rec.vlan_tag; *vlan_id = conn_entry->conn_rec.vlan_tag;
/*
* If no vlan id is specified in connect list, use the vlan id
* in the FCF record
*/
else if (fcf_vlan_id)
*vlan_id = fcf_vlan_id;
else else
*vlan_id = 0xFFFF; *vlan_id = 0xFFFF;
...@@ -1423,6 +1457,12 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) ...@@ -1423,6 +1457,12 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
if (phba->link_state >= LPFC_LINK_UP) if (phba->link_state >= LPFC_LINK_UP)
lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
else
/*
* Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
* flag
*/
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
if (unreg_fcf) { if (unreg_fcf) {
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
...@@ -2085,6 +2125,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ...@@ -2085,6 +2125,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
phba->link_events++;
if (la->attType == AT_LINK_UP && (!la->mm)) { if (la->attType == AT_LINK_UP && (!la->mm)) {
phba->fc_stat.LinkUp++; phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) { if (phba->link_flag & LS_LOOPBACK_MODE) {
...@@ -4409,6 +4450,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) ...@@ -4409,6 +4450,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
if (lpfc_fcf_inuse(phba)) if (lpfc_fcf_inuse(phba))
return; return;
/* At this point, all discovery is aborted */
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
/* Unregister VPIs */ /* Unregister VPIs */
vports = lpfc_create_vport_work_array(phba); vports = lpfc_create_vport_work_array(phba);
...@@ -4512,8 +4555,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, ...@@ -4512,8 +4555,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
/* Free the current connect table */ /* Free the current connect table */
list_for_each_entry_safe(conn_entry, next_conn_entry, list_for_each_entry_safe(conn_entry, next_conn_entry,
&phba->fcf_conn_rec_list, list) &phba->fcf_conn_rec_list, list) {
list_del_init(&conn_entry->list);
kfree(conn_entry); kfree(conn_entry);
}
conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
record_count = conn_hdr->length * sizeof(uint32_t)/ record_count = conn_hdr->length * sizeof(uint32_t)/
......
...@@ -2919,6 +2919,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, ...@@ -2919,6 +2919,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
int rc; int rc;
phba->fc_eventTag = acqe_fcoe->event_tag;
phba->fcoe_eventtag = acqe_fcoe->event_tag; phba->fcoe_eventtag = acqe_fcoe->event_tag;
switch (event_type) { switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF: case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
...@@ -2990,6 +2991,7 @@ static void ...@@ -2990,6 +2991,7 @@ static void
lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
struct lpfc_acqe_dcbx *acqe_dcbx) struct lpfc_acqe_dcbx *acqe_dcbx)
{ {
phba->fc_eventTag = acqe_dcbx->event_tag;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0290 The SLI4 DCBX asynchronous event is not " "0290 The SLI4 DCBX asynchronous event is not "
"handled yet\n"); "handled yet\n");
...@@ -3594,8 +3596,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) ...@@ -3594,8 +3596,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
/* Free the current connect table */ /* Free the current connect table */
list_for_each_entry_safe(conn_entry, next_conn_entry, list_for_each_entry_safe(conn_entry, next_conn_entry,
&phba->fcf_conn_rec_list, list) &phba->fcf_conn_rec_list, list) {
list_del_init(&conn_entry->list);
kfree(conn_entry); kfree(conn_entry);
}
return; return;
} }
...@@ -5058,15 +5062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) ...@@ -5058,15 +5062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
} }
phba->sli4_hba.els_cq = qdesc; phba->sli4_hba.els_cq = qdesc;
/* Create slow-path Unsolicited Receive Complete Queue */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0502 Failed allocate slow-path USOL RX CQ\n");
goto out_free_els_cq;
}
phba->sli4_hba.rxq_cq = qdesc;
/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
...@@ -5075,7 +5070,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) ...@@ -5075,7 +5070,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2577 Failed allocate memory for fast-path " "2577 Failed allocate memory for fast-path "
"CQ record array\n"); "CQ record array\n");
goto out_free_rxq_cq; goto out_free_els_cq;
} }
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
...@@ -5188,9 +5183,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) ...@@ -5188,9 +5183,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
} }
kfree(phba->sli4_hba.fcp_cq); kfree(phba->sli4_hba.fcp_cq);
out_free_rxq_cq:
lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
phba->sli4_hba.rxq_cq = NULL;
out_free_els_cq: out_free_els_cq:
lpfc_sli4_queue_free(phba->sli4_hba.els_cq); lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL; phba->sli4_hba.els_cq = NULL;
...@@ -5247,10 +5239,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) ...@@ -5247,10 +5239,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
phba->sli4_hba.dat_rq = NULL; phba->sli4_hba.dat_rq = NULL;
/* Release unsolicited receive complete queue */
lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
phba->sli4_hba.rxq_cq = NULL;
/* Release ELS complete queue */ /* Release ELS complete queue */
lpfc_sli4_queue_free(phba->sli4_hba.els_cq); lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL; phba->sli4_hba.els_cq = NULL;
...@@ -5383,25 +5371,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -5383,25 +5371,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_cq->queue_id, phba->sli4_hba.els_cq->queue_id,
phba->sli4_hba.sp_eq->queue_id); phba->sli4_hba.sp_eq->queue_id);
/* Set up slow-path Unsolicited Receive Complete Queue */
if (!phba->sli4_hba.rxq_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0532 USOL RX CQ not allocated\n");
goto out_destroy_els_cq;
}
rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
LPFC_RCQ, LPFC_USOL);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0533 Failed setup of slow-path USOL RX CQ: "
"rc = 0x%x\n", rc);
goto out_destroy_els_cq;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
phba->sli4_hba.rxq_cq->queue_id,
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */ /* Set up fast-path FCP Response Complete Queue */
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
...@@ -5507,7 +5476,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -5507,7 +5476,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
goto out_destroy_fcp_wq; goto out_destroy_fcp_wq;
} }
rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
phba->sli4_hba.rxq_cq, LPFC_USOL); phba->sli4_hba.els_cq, LPFC_USOL);
if (rc) { if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0541 Failed setup of Receive Queue: " "0541 Failed setup of Receive Queue: "
...@@ -5519,7 +5488,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -5519,7 +5488,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"parent cq-id=%d\n", "parent cq-id=%d\n",
phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.hdr_rq->queue_id,
phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.dat_rq->queue_id,
phba->sli4_hba.rxq_cq->queue_id); phba->sli4_hba.els_cq->queue_id);
return 0; return 0;
out_destroy_fcp_wq: out_destroy_fcp_wq:
...@@ -5531,8 +5500,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -5531,8 +5500,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
out_destroy_fcp_cq: out_destroy_fcp_cq:
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq: out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
...@@ -5574,8 +5541,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) ...@@ -5574,8 +5541,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
/* Unset ELS complete queue */ /* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset unsolicited receive complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
/* Unset FCP response complete queue */ /* Unset FCP response complete queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
......
This diff is collapsed.
...@@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd { ...@@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST LPFC_CTX_HOST
} lpfc_ctx_cmd; } lpfc_ctx_cmd;
/* This structure is used to carry the needed response IOCB states */ struct lpfc_cq_event {
struct lpfc_sli4_rspiocb_info { struct list_head list;
uint8_t hw_status; union {
uint8_t bfield; struct lpfc_mcqe mcqe_cmpl;
#define LPFC_XB 0x1 struct lpfc_acqe_link acqe_link;
#define LPFC_PV 0x2 struct lpfc_acqe_fcoe acqe_fcoe;
uint8_t priority; struct lpfc_acqe_dcbx acqe_dcbx;
uint8_t reserved; struct lpfc_rcqe rcqe_cmpl;
struct sli4_wcqe_xri_aborted wcqe_axri;
struct lpfc_wcqe_complete wcqe_cmpl;
} cqe;
}; };
/* This structure is used to handle IOCB requests / responses */ /* This structure is used to handle IOCB requests / responses */
...@@ -76,7 +79,7 @@ struct lpfc_iocbq { ...@@ -76,7 +79,7 @@ struct lpfc_iocbq {
struct lpfc_iocbq *); struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *); struct lpfc_iocbq *);
struct lpfc_sli4_rspiocb_info sli4_info; struct lpfc_cq_event cq_event;
}; };
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
......
...@@ -110,18 +110,6 @@ struct lpfc_queue { ...@@ -110,18 +110,6 @@ struct lpfc_queue {
union sli4_qe qe[1]; /* array to index entries (must be last) */ union sli4_qe qe[1]; /* array to index entries (must be last) */
}; };
struct lpfc_cq_event {
struct list_head list;
union {
struct lpfc_mcqe mcqe_cmpl;
struct lpfc_acqe_link acqe_link;
struct lpfc_acqe_fcoe acqe_fcoe;
struct lpfc_acqe_dcbx acqe_dcbx;
struct lpfc_rcqe rcqe_cmpl;
struct sli4_wcqe_xri_aborted wcqe_axri;
} cqe;
};
struct lpfc_sli4_link { struct lpfc_sli4_link {
uint8_t speed; uint8_t speed;
uint8_t duplex; uint8_t duplex;
...@@ -325,7 +313,6 @@ struct lpfc_sli4_hba { ...@@ -325,7 +313,6 @@ struct lpfc_sli4_hba {
struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
/* Setup information for various queue parameters */ /* Setup information for various queue parameters */
int eq_esize; int eq_esize;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment