Commit b84daac9 authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.3.33: Add debugfs interface to display SLI queue information

Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 34f5ad8b
...@@ -36,6 +36,9 @@ ...@@ -36,6 +36,9 @@
/* dumpHostSlim output buffer size */ /* dumpHostSlim output buffer size */
#define LPFC_DUMPHOSTSLIM_SIZE 4096 #define LPFC_DUMPHOSTSLIM_SIZE 4096
/* dumpSLIqinfo output buffer size */
#define LPFC_DUMPSLIQINFO_SIZE 4096
/* hbqinfo output buffer size */ /* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192 #define LPFC_HBQINFO_SIZE 8192
......
...@@ -101,8 +101,11 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) ...@@ -101,8 +101,11 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
temp_wqe = q->qe[q->host_index].wqe; temp_wqe = q->qe[q->host_index].wqe;
/* If the host has not yet processed the next entry then we are done */ /* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index) if (((q->host_index + 1) % q->entry_count) == q->hba_index) {
q->WQ_overflow++;
return -ENOMEM; return -ENOMEM;
}
q->WQ_posted++;
/* set consumption flag every once in a while */ /* set consumption flag every once in a while */
if (!((q->host_index + 1) % q->entry_repost)) if (!((q->host_index + 1) % q->entry_repost))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
...@@ -11311,14 +11314,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) ...@@ -11311,14 +11314,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
case FC_STATUS_RQ_BUF_LEN_EXCEEDED: case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n"); "2537 Receive Frame Truncated!!\n");
hrq->RQ_buf_trunc++;
case FC_STATUS_RQ_SUCCESS: case FC_STATUS_RQ_SUCCESS:
lpfc_sli4_rq_release(hrq, drq); lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) { if (!dma_buf) {
hrq->RQ_no_buf_found++;
spin_unlock_irqrestore(&phba->hbalock, iflags); spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out; goto out;
} }
hrq->RQ_rcv_buf++;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
/* save off the frame for the word thread to process */ /* save off the frame for the word thread to process */
list_add_tail(&dma_buf->cq_event.list, list_add_tail(&dma_buf->cq_event.list,
...@@ -11330,6 +11336,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) ...@@ -11330,6 +11336,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
break; break;
case FC_STATUS_INSUFF_BUF_NEED_BUF: case FC_STATUS_INSUFF_BUF_NEED_BUF:
case FC_STATUS_INSUFF_BUF_FRM_DISC: case FC_STATUS_INSUFF_BUF_FRM_DISC:
hrq->RQ_no_posted_buf++;
/* Post more buffers if possible */ /* Post more buffers if possible */
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
...@@ -11457,6 +11464,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) ...@@ -11457,6 +11464,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
if (!(++ecount % cq->entry_repost)) if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
cq->CQ_mbox++;
} }
break; break;
case LPFC_WCQ: case LPFC_WCQ:
...@@ -11470,6 +11478,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) ...@@ -11470,6 +11478,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
if (!(++ecount % cq->entry_repost)) if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
} }
/* Track the max number of CQEs processed in 1 EQ */
if (ecount > cq->CQ_max_cqe)
cq->CQ_max_cqe = ecount;
break; break;
default: default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
...@@ -11621,17 +11633,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, ...@@ -11621,17 +11633,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Check and process for different type of WCQE and dispatch */ /* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE: case CQE_CODE_COMPL_WQE:
cq->CQ_wq++;
/* Process the WQ complete event */ /* Process the WQ complete event */
phba->last_completion_time = jiffies; phba->last_completion_time = jiffies;
lpfc_sli4_fp_handle_fcp_wcqe(phba, lpfc_sli4_fp_handle_fcp_wcqe(phba,
(struct lpfc_wcqe_complete *)&wcqe); (struct lpfc_wcqe_complete *)&wcqe);
break; break;
case CQE_CODE_RELEASE_WQE: case CQE_CODE_RELEASE_WQE:
cq->CQ_release_wqe++;
/* Process the WQ release event */ /* Process the WQ release event */
lpfc_sli4_fp_handle_rel_wcqe(phba, cq, lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
(struct lpfc_wcqe_release *)&wcqe); (struct lpfc_wcqe_release *)&wcqe);
break; break;
case CQE_CODE_XRI_ABORTED: case CQE_CODE_XRI_ABORTED:
cq->CQ_xri_aborted++;
/* Process the WQ XRI abort event */ /* Process the WQ XRI abort event */
phba->last_completion_time = jiffies; phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
...@@ -11709,6 +11724,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, ...@@ -11709,6 +11724,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
} }
/* Track the max number of CQEs processed in 1 EQ */
if (ecount > cq->CQ_max_cqe)
cq->CQ_max_cqe = ecount;
/* Catch the no cq entry condition */ /* Catch the no cq entry condition */
if (unlikely(ecount == 0)) if (unlikely(ecount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
...@@ -11780,6 +11799,7 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id) ...@@ -11780,6 +11799,7 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
/* Check device state for handling interrupt */ /* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) { if (unlikely(lpfc_intr_state_check(phba))) {
speq->EQ_badstate++;
/* Check again for link_state with lock held */ /* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN) if (phba->link_state < LPFC_LINK_DOWN)
...@@ -11796,13 +11816,19 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id) ...@@ -11796,13 +11816,19 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
lpfc_sli4_sp_handle_eqe(phba, eqe); lpfc_sli4_sp_handle_eqe(phba, eqe);
if (!(++ecount % speq->entry_repost)) if (!(++ecount % speq->entry_repost))
lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
speq->EQ_processed++;
} }
/* Track the max number of EQEs processed in 1 intr */
if (ecount > speq->EQ_max_eqe)
speq->EQ_max_eqe = ecount;
/* Always clear and re-arm the slow-path EQ */ /* Always clear and re-arm the slow-path EQ */
lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
/* Catch the no cq entry condition */ /* Catch the no cq entry condition */
if (unlikely(ecount == 0)) { if (unlikely(ecount == 0)) {
speq->EQ_no_entry++;
if (phba->intr_type == MSIX) if (phba->intr_type == MSIX)
/* MSI-X treated interrupt served as no EQ share INT */ /* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
...@@ -11864,6 +11890,7 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id) ...@@ -11864,6 +11890,7 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
/* Check device state for handling interrupt */ /* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) { if (unlikely(lpfc_intr_state_check(phba))) {
fpeq->EQ_badstate++;
/* Check again for link_state with lock held */ /* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN) if (phba->link_state < LPFC_LINK_DOWN)
...@@ -11880,12 +11907,18 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id) ...@@ -11880,12 +11907,18 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
if (!(++ecount % fpeq->entry_repost)) if (!(++ecount % fpeq->entry_repost))
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
fpeq->EQ_processed++;
} }
/* Track the max number of EQEs processed in 1 intr */
if (ecount > fpeq->EQ_max_eqe)
fpeq->EQ_max_eqe = ecount;
/* Always clear and re-arm the fast-path EQ */ /* Always clear and re-arm the fast-path EQ */
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) { if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
if (phba->intr_type == MSIX) if (phba->intr_type == MSIX)
/* MSI-X treated interrupt served as no EQ share INT */ /* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
......
...@@ -141,6 +141,35 @@ struct lpfc_queue { ...@@ -141,6 +141,35 @@ struct lpfc_queue {
uint32_t page_count; /* Number of pages allocated for this queue */ uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */ uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */ uint32_t hba_index; /* The last known hba index for get or put */
/* For q stats */
uint32_t q_cnt_1;
uint32_t q_cnt_2;
uint32_t q_cnt_3;
uint64_t q_cnt_4;
/* defines for EQ stats */
#define EQ_max_eqe q_cnt_1
#define EQ_no_entry q_cnt_2
#define EQ_badstate q_cnt_3
#define EQ_processed q_cnt_4
/* defines for CQ stats */
#define CQ_mbox q_cnt_1
#define CQ_max_cqe q_cnt_1
#define CQ_release_wqe q_cnt_2
#define CQ_xri_aborted q_cnt_3
#define CQ_wq q_cnt_4
/* defines for WQ stats */
#define WQ_overflow q_cnt_1
#define WQ_posted q_cnt_4
/* defines for RQ stats */
#define RQ_no_posted_buf q_cnt_1
#define RQ_no_buf_found q_cnt_2
#define RQ_buf_trunc q_cnt_3
#define RQ_rcv_buf q_cnt_4
union sli4_qe qe[1]; /* array to index entries (must be last) */ union sli4_qe qe[1]; /* array to index entries (must be last) */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment