Commit 4d4c4a4a authored by James Smart's avatar James Smart Committed by Christoph Hellwig

Fix max_sgl_segments settings for NVME / NVMET

Cannot set NVME segment counts to a large number

The existing module parameter lpfc_sg_seg_cnt is used for both
SCSI and NVME.

Limit the module parameter lpfc_sg_seg_cnt to 128 with the
default being 64 for both NVME and NVMET, assuming NVME is enabled in the
driver for that port. The driver will set max_sgl_segments in the
NVME/NVMET template to lpfc_sg_seg_cnt + 1.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
parent 9d3d340d
...@@ -56,7 +56,7 @@ struct lpfc_sli2_slim; ...@@ -56,7 +56,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
#define LPFC_MIN_NVME_SEG_CNT 254 #define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
...@@ -781,6 +781,7 @@ struct lpfc_hba { ...@@ -781,6 +781,7 @@ struct lpfc_hba {
uint32_t cfg_nvmet_fb_size; uint32_t cfg_nvmet_fb_size;
uint32_t cfg_total_seg_cnt; uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_seg_cnt;
uint32_t cfg_nvme_seg_cnt;
uint32_t cfg_sg_dma_buf_size; uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn; uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn; uint64_t cfg_soft_wwpn;
......
...@@ -1114,12 +1114,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, ...@@ -1114,12 +1114,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl; first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt; lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) { if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from " "6058 Too many sg segments from "
"NVME Transport. Max %d, " "NVME Transport. Max %d, "
"nvmeIO sg_cnt %d\n", "nvmeIO sg_cnt %d\n",
phba->cfg_sg_seg_cnt, phba->cfg_nvme_seg_cnt,
lpfc_ncmd->seg_cnt); lpfc_ncmd->seg_cnt);
lpfc_ncmd->seg_cnt = 0; lpfc_ncmd->seg_cnt = 0;
return 1; return 1;
...@@ -2158,8 +2158,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) ...@@ -2158,8 +2158,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
/* For now need + 1 to get around NVME transport logic */ /* Limit to LPFC_MAX_NVME_SEG_CNT.
lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1; * For now need + 1 to get around NVME transport logic.
*/
if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
"6300 Reducing sg segment cnt to %d\n",
LPFC_MAX_NVME_SEG_CNT);
phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
} else {
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
/* localport is allocated from the stack, but the registration /* localport is allocated from the stack, but the registration
......
...@@ -704,8 +704,19 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) ...@@ -704,8 +704,19 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
pinfo.port_id = vport->fc_myDID; pinfo.port_id = vport->fc_myDID;
/* Limit to LPFC_MAX_NVME_SEG_CNT.
* For now need + 1 to get around NVME transport logic.
*/
if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6400 Reducing sg segment cnt to %d\n",
LPFC_MAX_NVME_SEG_CNT);
phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
} else {
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt + 1;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
NVMET_FCTGTFEAT_CMD_IN_ISR | NVMET_FCTGTFEAT_CMD_IN_ISR |
...@@ -1278,11 +1289,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, ...@@ -1278,11 +1289,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL; return NULL;
} }
if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) { if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: " "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
"NPORT x%x oxid:x%x\n", "NPORT x%x oxid:x%x cnt %d\n",
ctxp->sid, ctxp->oxid); ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment