Commit 6c621a22 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Separate NVMET RQ buffer posting from IO resources SGL/iocbq/context

Currently IO resources are mapped 1 to 1 with RQ buffers posted

Added logic to separate RQE buffers from IO op resources
(sgl/iocbq/context). During initialization, the driver will determine
how many SGLs it will allocate for NVMET (based on what the firmware
reports) and associate a NVMET IOCBq and NVMET context structure with
each one.

Now that hdr/data buffers are immediately reposted back to the RQ, 512
RQEs for each MRQ is sufficient. Also, since NVMET data buffers are now
128 bytes, lpfc_nvmet_mrq_post is not necessary anymore as we will
always post the max (512) buffers per NVMET MRQ.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 3c603be9
...@@ -141,6 +141,13 @@ struct lpfc_dmabuf { ...@@ -141,6 +141,13 @@ struct lpfc_dmabuf {
uint32_t buffer_tag; /* used for tagged queue ring */ uint32_t buffer_tag; /* used for tagged queue ring */
}; };
struct lpfc_nvmet_ctxbuf {
struct list_head list;
struct lpfc_nvmet_rcv_ctx *context;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
};
struct lpfc_dma_pool { struct lpfc_dma_pool {
struct lpfc_dmabuf *elements; struct lpfc_dmabuf *elements;
uint32_t max_count; uint32_t max_count;
...@@ -163,9 +170,6 @@ struct rqb_dmabuf { ...@@ -163,9 +170,6 @@ struct rqb_dmabuf {
struct lpfc_dmabuf dbuf; struct lpfc_dmabuf dbuf;
uint16_t total_size; uint16_t total_size;
uint16_t bytes_recv; uint16_t bytes_recv;
void *context;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
struct lpfc_queue *hrq; /* ptr to associated Header RQ */ struct lpfc_queue *hrq; /* ptr to associated Header RQ */
struct lpfc_queue *drq; /* ptr to associated Data RQ */ struct lpfc_queue *drq; /* ptr to associated Data RQ */
}; };
...@@ -777,7 +781,6 @@ struct lpfc_hba { ...@@ -777,7 +781,6 @@ struct lpfc_hba {
uint32_t cfg_nvme_oas; uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_io_channel; uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq; uint32_t cfg_nvmet_mrq;
uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_enable_nvmet; uint32_t cfg_enable_nvmet;
uint32_t cfg_nvme_enable_fb; uint32_t cfg_nvme_enable_fb;
uint32_t cfg_nvmet_fb_size; uint32_t cfg_nvmet_fb_size;
......
...@@ -3315,14 +3315,6 @@ LPFC_ATTR_R(nvmet_mrq, ...@@ -3315,14 +3315,6 @@ LPFC_ATTR_R(nvmet_mrq,
1, 1, 16, 1, 1, 16,
"Specify number of RQ pairs for processing NVMET cmds"); "Specify number of RQ pairs for processing NVMET cmds");
/*
* lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
"Specify number of buffers to post on every MRQ");
/* /*
* lpfc_enable_fc4_type: Defines what FC4 types are supported. * lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP * Supported Values: 1 - register just FCP
...@@ -5158,7 +5150,6 @@ struct device_attribute *lpfc_hba_attrs[] = { ...@@ -5158,7 +5150,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_suppress_rsp, &dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvme_io_channel, &dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq, &dev_attr_lpfc_nvmet_mrq,
&dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb, &dev_attr_lpfc_nvme_enable_fb,
&dev_attr_lpfc_nvmet_fb_size, &dev_attr_lpfc_nvmet_fb_size,
&dev_attr_lpfc_enable_bg, &dev_attr_lpfc_enable_bg,
...@@ -6198,7 +6189,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) ...@@ -6198,7 +6189,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
/* Initialize first burst. Target vs Initiator are different. */ /* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
...@@ -6295,7 +6285,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) ...@@ -6295,7 +6285,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
/* Not NVME Target mode. Turn off Target parameters. */ /* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0; phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0; phba->cfg_nvmet_mrq = 0;
phba->cfg_nvmet_mrq_post = 0;
phba->cfg_nvmet_fb_size = 0; phba->cfg_nvmet_fb_size = 0;
} }
......
...@@ -75,6 +75,8 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); ...@@ -75,6 +75,8 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *); void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
...@@ -246,16 +248,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); ...@@ -246,16 +248,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
struct lpfc_dmabuf *mp); struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr); struct fc_frame_header *fc_hdr);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t); uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
struct lpfc_queue *dq, int count);
int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
void lpfc_unregister_fcf(struct lpfc_hba *); void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *); void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
......
...@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) ...@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
} }
} }
...@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) ...@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
{ {
struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
uint16_t i, lxri, xri_cnt, els_xri_cnt; uint16_t i, lxri, xri_cnt, els_xri_cnt;
uint16_t nvmet_xri_cnt, tot_cnt; uint16_t nvmet_xri_cnt;
LIST_HEAD(nvmet_sgl_list); LIST_HEAD(nvmet_sgl_list);
int rc; int rc;
...@@ -3389,20 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) ...@@ -3389,20 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
* update on pci function's nvmet xri-sgl list * update on pci function's nvmet xri-sgl list
*/ */
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
/* Ensure we at least meet the minimun for the system */ /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
if (nvmet_xri_cnt < LPFC_NVMET_RQE_DEF_COUNT) nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
nvmet_xri_cnt = LPFC_NVMET_RQE_DEF_COUNT;
tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
if (nvmet_xri_cnt > tot_cnt) {
phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"6301 NVMET post-sgl count changed to %d\n",
phba->cfg_nvmet_mrq_post);
}
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
/* els xri-sgl expanded */ /* els xri-sgl expanded */
...@@ -5835,6 +5824,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -5835,6 +5824,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
/* Fast-path XRI aborted CQ Event work queue list */ /* Fast-path XRI aborted CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
} }
...@@ -6279,7 +6270,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) ...@@ -6279,7 +6270,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
* *
* This routine is invoked to free the driver's IOCB list and memory. * This routine is invoked to free the driver's IOCB list and memory.
**/ **/
static void void
lpfc_free_iocb_list(struct lpfc_hba *phba) lpfc_free_iocb_list(struct lpfc_hba *phba)
{ {
struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
...@@ -6307,7 +6298,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba) ...@@ -6307,7 +6298,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
* 0 - successful * 0 - successful
* other values - error * other values - error
**/ **/
static int int
lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
{ {
struct lpfc_iocbq *iocbq_entry = NULL; struct lpfc_iocbq *iocbq_entry = NULL;
...@@ -8321,46 +8312,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) ...@@ -8321,46 +8312,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
} }
int
lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count)
{
int rc, i;
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
struct lpfc_rqb *rqbp;
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
if (!rqb_buffer)
break;
rqb_buffer->hrq = hrq;
rqb_buffer->drq = drq;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
while (!list_empty(&rqb_buf_list)) {
list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
hbuf.list);
hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
if (rc < 0) {
(rqbp->rqb_free_buffer)(phba, rqb_buffer);
} else {
list_add_tail(&rqb_buffer->hbuf.list,
&rqbp->rqb_buffer_list);
rqbp->buffer_count++;
}
}
return 1;
}
int int
lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
{ {
...@@ -11103,7 +11054,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -11103,7 +11054,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct lpfc_hba *phba; struct lpfc_hba *phba;
struct lpfc_vport *vport = NULL; struct lpfc_vport *vport = NULL;
struct Scsi_Host *shost = NULL; struct Scsi_Host *shost = NULL;
int error, cnt, num; int error;
uint32_t cfg_mode, intr_mode; uint32_t cfg_mode, intr_mode;
/* Allocate memory for HBA structure */ /* Allocate memory for HBA structure */
...@@ -11137,27 +11088,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -11137,27 +11088,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_unset_pci_mem_s4; goto out_unset_pci_mem_s4;
} }
cnt = phba->cfg_iocb_cnt * 1024;
if (phba->nvmet_support) {
/* Ensure we at least meet the minimun for the system */
num = (phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq);
if (num < LPFC_NVMET_RQE_DEF_COUNT)
num = LPFC_NVMET_RQE_DEF_COUNT;
cnt += num;
}
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2821 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
error = lpfc_init_iocb_list(phba, cnt);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1413 Failed to initialize iocb list.\n");
goto out_unset_driver_resource_s4;
}
INIT_LIST_HEAD(&phba->active_rrq_list); INIT_LIST_HEAD(&phba->active_rrq_list);
INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
...@@ -11166,7 +11096,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -11166,7 +11096,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
if (error) { if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1414 Failed to set up driver resource.\n"); "1414 Failed to set up driver resource.\n");
goto out_free_iocb_list; goto out_unset_driver_resource_s4;
} }
/* Get the default values for Model Name and Description */ /* Get the default values for Model Name and Description */
...@@ -11266,8 +11196,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -11266,8 +11196,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_destroy_shost(phba); lpfc_destroy_shost(phba);
out_unset_driver_resource: out_unset_driver_resource:
lpfc_unset_driver_resource_phase2(phba); lpfc_unset_driver_resource_phase2(phba);
out_free_iocb_list:
lpfc_free_iocb_list(phba);
out_unset_driver_resource_s4: out_unset_driver_resource_s4:
lpfc_sli4_driver_resource_unset(phba); lpfc_sli4_driver_resource_unset(phba);
out_unset_pci_mem_s4: out_unset_pci_mem_s4:
......
...@@ -629,8 +629,6 @@ struct rqb_dmabuf * ...@@ -629,8 +629,6 @@ struct rqb_dmabuf *
lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{ {
struct rqb_dmabuf *dma_buf; struct rqb_dmabuf *dma_buf;
struct lpfc_iocbq *nvmewqe;
union lpfc_wqe128 *wqe;
dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
if (!dma_buf) if (!dma_buf)
...@@ -651,60 +649,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) ...@@ -651,60 +649,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
return NULL; return NULL;
} }
dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
GFP_KERNEL);
if (!dma_buf->context) {
pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
return NULL;
}
dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
if (!dma_buf->iocbq) {
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"2621 Ran out of nvmet iocb/WQEs\n");
return NULL;
}
dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
nvmewqe = dma_buf->iocbq;
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
/* Word 7 */
bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
/* Word 10 */
bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
dma_buf->iocbq->context1 = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
if (!dma_buf->sglq) {
lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6132 Ran out of nvmet XRIs\n");
return NULL;
}
return dma_buf; return dma_buf;
} }
...@@ -723,18 +667,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) ...@@ -723,18 +667,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
void void
lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
{ {
unsigned long flags;
__lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
dmab->sglq->state = SGL_FREED;
dmab->sglq->ndlp = NULL;
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
lpfc_sli_release_iocbq(phba, dmab->iocbq);
kfree(dmab->context);
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
pci_pool_free(phba->lpfc_nvmet_drb_pool, pci_pool_free(phba->lpfc_nvmet_drb_pool,
dmab->dbuf.virt, dmab->dbuf.phys); dmab->dbuf.virt, dmab->dbuf.phys);
...@@ -822,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) ...@@ -822,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) { if (rc < 0) {
(rqbp->rqb_free_buffer)(phba, rqb_entry); (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to RQ %d: %x %x\n",
rqb_entry->hrq->queue_id,
rqb_entry->hrq->host_index,
rqb_entry->hrq->hba_index);
} else { } else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++; rqbp->buffer_count++;
......
This diff is collapsed.
...@@ -106,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx { ...@@ -106,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
struct rqb_dmabuf *rqb_buffer; struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t ts_isr_cmd; uint64_t ts_isr_cmd;
......
...@@ -6513,6 +6513,49 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) ...@@ -6513,6 +6513,49 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
(phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
} }
static int
lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count)
{
int rc, i;
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
struct lpfc_rqb *rqbp;
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
/* IF RQ is already full, don't bother */
if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
break;
rqb_buffer = rqbp->rqb_alloc_buffer(phba);
if (!rqb_buffer)
break;
rqb_buffer->hrq = hrq;
rqb_buffer->drq = drq;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
while (!list_empty(&rqb_buf_list)) {
list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
hbuf.list);
hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
if (rc < 0) {
rqbp->rqb_free_buffer(phba, rqb_buffer);
} else {
list_add_tail(&rqb_buffer->hbuf.list,
&rqbp->rqb_buffer_list);
rqbp->buffer_count++;
}
}
return 1;
}
/** /**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
...@@ -6525,7 +6568,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) ...@@ -6525,7 +6568,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
int int
lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{ {
int rc, i; int rc, i, cnt;
LPFC_MBOXQ_t *mboxq; LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe; struct lpfc_mqe *mqe;
uint8_t *vpd; uint8_t *vpd;
...@@ -6876,6 +6919,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) ...@@ -6876,6 +6919,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_destroy_queue; goto out_destroy_queue;
} }
phba->sli4_hba.nvmet_xri_cnt = rc; phba->sli4_hba.nvmet_xri_cnt = rc;
cnt = phba->cfg_iocb_cnt * 1024;
/* We need 1 iocbq for every SGL, for IO processing */
cnt += phba->sli4_hba.nvmet_xri_cnt;
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2821 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1413 Failed to init iocb list.\n");
goto out_destroy_queue;
}
lpfc_nvmet_create_targetport(phba); lpfc_nvmet_create_targetport(phba);
} else { } else {
/* update host scsi xri-sgl sizes and mappings */ /* update host scsi xri-sgl sizes and mappings */
...@@ -6895,10 +6953,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) ...@@ -6895,10 +6953,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"and mapping: %d\n", rc); "and mapping: %d\n", rc);
goto out_destroy_queue; goto out_destroy_queue;
} }
cnt = phba->cfg_iocb_cnt * 1024;
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2820 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6301 Failed to init iocb list.\n");
goto out_destroy_queue;
}
} }
if (phba->nvmet_support && phba->cfg_nvmet_mrq) { if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
/* Post initial buffers to all RQs created */ /* Post initial buffers to all RQs created */
for (i = 0; i < phba->cfg_nvmet_mrq; i++) { for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
...@@ -6911,7 +6980,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) ...@@ -6911,7 +6980,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_post_rq_buffer( lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[i], phba, phba->sli4_hba.nvmet_mrq_hdr[i],
phba->sli4_hba.nvmet_mrq_data[i], phba->sli4_hba.nvmet_mrq_data[i],
phba->cfg_nvmet_mrq_post); LPFC_NVMET_RQE_DEF_COUNT);
} }
} }
...@@ -7078,6 +7147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) ...@@ -7078,6 +7147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Unset all the queues set up in this routine when error out */ /* Unset all the queues set up in this routine when error out */
lpfc_sli4_queue_unset(phba); lpfc_sli4_queue_unset(phba);
out_destroy_queue: out_destroy_queue:
lpfc_free_iocb_list(phba);
lpfc_sli4_queue_destroy(phba); lpfc_sli4_queue_destroy(phba);
out_stop_timers: out_stop_timers:
lpfc_stop_hba_timers(phba); lpfc_stop_hba_timers(phba);
...@@ -18731,7 +18801,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, ...@@ -18731,7 +18801,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags); spin_lock_irqsave(&pring->ring_lock, iflags);
ctxp = pwqe->context2; ctxp = pwqe->context2;
sglq = ctxp->rqb_buffer->sglq; sglq = ctxp->ctxbuf->sglq;
if (pwqe->sli4_xritag == NO_XRI) { if (pwqe->sli4_xritag == NO_XRI) {
pwqe->sli4_lxritag = sglq->sli4_lxritag; pwqe->sli4_lxritag = sglq->sli4_lxritag;
pwqe->sli4_xritag = sglq->sli4_xritag; pwqe->sli4_xritag = sglq->sli4_xritag;
......
...@@ -618,10 +618,12 @@ struct lpfc_sli4_hba { ...@@ -618,10 +618,12 @@ struct lpfc_sli4_hba {
uint16_t scsi_xri_start; uint16_t scsi_xri_start;
uint16_t els_xri_cnt; uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt; uint16_t nvmet_xri_cnt;
uint16_t nvmet_ctx_cnt;
struct list_head lpfc_els_sgl_list; struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list; struct list_head lpfc_abts_els_sgl_list;
struct list_head lpfc_nvmet_sgl_list; struct list_head lpfc_nvmet_sgl_list;
struct list_head lpfc_abts_nvmet_ctx_list; struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list; struct list_head lpfc_abts_nvme_buf_list;
struct lpfc_sglq **lpfc_sglq_active_list; struct lpfc_sglq **lpfc_sglq_active_list;
...@@ -662,8 +664,6 @@ struct lpfc_sli4_hba { ...@@ -662,8 +664,6 @@ struct lpfc_sli4_hba {
uint16_t num_online_cpu; uint16_t num_online_cpu;
uint16_t num_present_cpu; uint16_t num_present_cpu;
uint16_t curr_disp_cpu; uint16_t curr_disp_cpu;
uint16_t nvmet_mrq_post_idx;
}; };
enum lpfc_sge_type { enum lpfc_sge_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment