Commit 3c603be9 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Separate NVMET data buffer pool fir ELS/CT.

Using 2048 byte buffer and onle 128 bytes is needed.

Create nee LFPC_NVMET_DATA_BUF_SIZE define to use for NVMET RQ/MRQs.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 7869da18
...@@ -943,6 +943,7 @@ struct lpfc_hba { ...@@ -943,6 +943,7 @@ struct lpfc_hba {
struct pci_pool *lpfc_mbuf_pool; struct pci_pool *lpfc_mbuf_pool;
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct pci_pool *txrdy_payload_pool; struct pci_pool *txrdy_payload_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool;
......
...@@ -271,6 +271,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); ...@@ -271,6 +271,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align); int lpfc_mem_alloc(struct lpfc_hba *, int align);
int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba);
int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *);
void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_mem_free_all(struct lpfc_hba *);
......
...@@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy { ...@@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy {
#define LPFC_HDR_BUF_SIZE 128 #define LPFC_HDR_BUF_SIZE 128
#define LPFC_DATA_BUF_SIZE 2048 #define LPFC_DATA_BUF_SIZE 2048
#define LPFC_NVMET_DATA_BUF_SIZE 128
struct rq_context { struct rq_context {
uint32_t word0; uint32_t word0;
#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */
......
...@@ -5956,16 +5956,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -5956,16 +5956,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
if (wwn == lpfc_enable_nvmet[i]) { if (wwn == lpfc_enable_nvmet[i]) {
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
if (lpfc_nvmet_mem_alloc(phba))
break;
phba->nvmet_support = 1; /* a match */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6017 NVME Target %016llx\n", "6017 NVME Target %016llx\n",
wwn); wwn);
phba->nvmet_support = 1; /* a match */
#else #else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6021 Can't enable NVME Target." "6021 Can't enable NVME Target."
" NVME_TARGET_FC infrastructure" " NVME_TARGET_FC infrastructure"
" is not in kernel\n"); " is not in kernel\n");
#endif #endif
break;
} }
} }
} }
......
...@@ -214,6 +214,21 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) ...@@ -214,6 +214,21 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
return -ENOMEM; return -ENOMEM;
} }
int
lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
{
phba->lpfc_nvmet_drb_pool =
pci_pool_create("lpfc_nvmet_drb_pool",
phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
SGL_ALIGN_SZ, 0);
if (!phba->lpfc_nvmet_drb_pool) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6024 Can't enable NVME Target - no memory\n");
return -ENOMEM;
}
return 0;
}
/** /**
* lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
* @phba: HBA to free memory for * @phba: HBA to free memory for
...@@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba) ...@@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free HBQ pools */ /* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba); lpfc_sli_hbqbuf_free_all(phba);
if (phba->lpfc_nvmet_drb_pool)
pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
phba->lpfc_nvmet_drb_pool = NULL;
if (phba->lpfc_drb_pool) if (phba->lpfc_drb_pool)
pci_pool_destroy(phba->lpfc_drb_pool); pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL; phba->lpfc_drb_pool = NULL;
...@@ -624,20 +642,20 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) ...@@ -624,20 +642,20 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
kfree(dma_buf); kfree(dma_buf);
return NULL; return NULL;
} }
dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
&dma_buf->dbuf.phys); GFP_KERNEL, &dma_buf->dbuf.phys);
if (!dma_buf->dbuf.virt) { if (!dma_buf->dbuf.virt) {
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys); dma_buf->hbuf.phys);
kfree(dma_buf); kfree(dma_buf);
return NULL; return NULL;
} }
dma_buf->total_size = LPFC_DATA_BUF_SIZE; dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
GFP_KERNEL); GFP_KERNEL);
if (!dma_buf->context) { if (!dma_buf->context) {
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys); dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys); dma_buf->hbuf.phys);
...@@ -648,7 +666,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) ...@@ -648,7 +666,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
dma_buf->iocbq = lpfc_sli_get_iocbq(phba); dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
if (!dma_buf->iocbq) { if (!dma_buf->iocbq) {
kfree(dma_buf->context); kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys); dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys); dma_buf->hbuf.phys);
...@@ -678,7 +696,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) ...@@ -678,7 +696,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
if (!dma_buf->sglq) { if (!dma_buf->sglq) {
lpfc_sli_release_iocbq(phba, dma_buf->iocbq); lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
kfree(dma_buf->context); kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys); dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys); dma_buf->hbuf.phys);
...@@ -718,7 +736,8 @@ lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) ...@@ -718,7 +736,8 @@ lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
lpfc_sli_release_iocbq(phba, dmab->iocbq); lpfc_sli_release_iocbq(phba, dmab->iocbq);
kfree(dmab->context); kfree(dmab->context);
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); pci_pool_free(phba->lpfc_nvmet_drb_pool,
dmab->dbuf.virt, dmab->dbuf.phys);
kfree(dmab); kfree(dmab);
} }
......
...@@ -15079,7 +15079,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, ...@@ -15079,7 +15079,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
bf_set(lpfc_rq_context_rqe_count_1, bf_set(lpfc_rq_context_rqe_count_1,
&rq_create->u.request.context, hrq->entry_count); &rq_create->u.request.context, hrq->entry_count);
rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; if (subtype == LPFC_NVMET)
rq_create->u.request.context.buffer_size =
LPFC_NVMET_DATA_BUF_SIZE;
else
rq_create->u.request.context.buffer_size =
LPFC_DATA_BUF_SIZE;
bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
LPFC_RQE_SIZE_8); LPFC_RQE_SIZE_8);
bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
...@@ -15116,8 +15121,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, ...@@ -15116,8 +15121,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
LPFC_RQ_RING_SIZE_4096); LPFC_RQ_RING_SIZE_4096);
break; break;
} }
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, if (subtype == LPFC_NVMET)
LPFC_DATA_BUF_SIZE); bf_set(lpfc_rq_context_buf_size,
&rq_create->u.request.context,
LPFC_NVMET_DATA_BUF_SIZE);
else
bf_set(lpfc_rq_context_buf_size,
&rq_create->u.request.context,
LPFC_DATA_BUF_SIZE);
} }
bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
cq->queue_id); cq->queue_id);
...@@ -15263,7 +15274,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, ...@@ -15263,7 +15274,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
cq->queue_id); cq->queue_id);
bf_set(lpfc_rq_context_data_size, bf_set(lpfc_rq_context_data_size,
&rq_create->u.request.context, &rq_create->u.request.context,
LPFC_DATA_BUF_SIZE); LPFC_NVMET_DATA_BUF_SIZE);
bf_set(lpfc_rq_context_hdr_size, bf_set(lpfc_rq_context_hdr_size,
&rq_create->u.request.context, &rq_create->u.request.context,
LPFC_HDR_BUF_SIZE); LPFC_HDR_BUF_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment