Commit 2d7dbc4c authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: NVME Target: Receive buffer updates

NVME Target: Receive buffer updates

Allocates buffer pools and configures adapter interfaces to handle
receive buffer (asynchronous FCP CMD ius, first burst data)
from the adapter. Splits by protocol, etc.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent f358dd0c
......@@ -770,8 +770,11 @@ struct lpfc_hba {
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq;
uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_enable_nvmet;
uint32_t cfg_nvme_enable_fb;
uint32_t cfg_nvmet_fb_size;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
......
......@@ -58,6 +58,10 @@
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
#define LPFC_DEF_MRQ_POST 256
#define LPFC_MIN_MRQ_POST 32
#define LPFC_MAX_MRQ_POST 512
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
......@@ -3281,6 +3285,24 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
"Enable suppress rsp feature is firmware supports it");
/*
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
* lpfc_nvmet_mrq = 1 use a single RQ pair
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq,
1, 1, 16,
"Specify number of RQ pairs for processing NVMET cmds");
/*
* lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
"Specify number of buffers to post on every MRQ");
/*
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP
......@@ -4657,13 +4679,28 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
"First burst size for Targets that support first burst");
/*
* lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
* For the Initiator (I), enabling this parameter means that an NVME
* PRLI response with FBA enabled and an FB_SIZE set to a nonzero value
* will be processed by the initiator for subsequent NVME FCP IO.
* lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
* When the driver is configured as an NVME target, this value is
* communicated to the NVME initiator in the PRLI response. It is
* used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
* parameters are set and the target is sending the PRLI RSP.
* Parameter supported on physical port only - no NPIV support.
* Value range is [0,1]. Default value is 0 (disabled).
* Value range is [0,65536]. Default value is 0.
*/
LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
"NVME Target mode first burst size in 512B increments.");
/*
* lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
* For the Initiator (I), enabling this parameter means that an NVMET
* PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
* processed by the initiator for subsequent NVME FCP IO. For the target
* function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
* driver parameter as the target function's first burst size returned to the
* initiator in the target's NVME PRLI response. Parameter supported on physical
* port only - no NPIV support.
* Value range is [0,1]. Default value is 0 (disabled).
*/
LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
"Enable First Burst feature on I and T functions.");
......@@ -5099,7 +5136,10 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fcp_io_channel,
&dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq,
&dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb,
&dev_attr_lpfc_nvmet_fb_size,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
......@@ -6136,9 +6176,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
......@@ -6205,9 +6248,35 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->nvmet_support) {
phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
phba->cfg_fcp_io_channel = 0;
} else
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6013 %s x%x fb_size x%x, fb_max x%x\n",
"NVME Target PRLI ACC enable_fb ",
phba->cfg_nvme_enable_fb,
phba->cfg_nvmet_fb_size,
LPFC_NVMET_FB_SZ_MAX);
if (phba->cfg_nvme_enable_fb == 0)
phba->cfg_nvmet_fb_size = 0;
else {
if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
}
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
}
} else {
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
phba->cfg_nvmet_mrq_post = 0;
phba->cfg_nvmet_fb_size = 0;
}
if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
......
......@@ -229,6 +229,7 @@ void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode);
void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
......
......@@ -2837,7 +2837,7 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
static int
lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
int *len, int max_cnt, int eq_id)
int *len, int max_cnt, int eqidx, int eq_id)
{
struct lpfc_queue *qp;
int qidx, rc;
......@@ -2880,6 +2880,27 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1;
}
if (phba->cfg_nvmet_mrq > eqidx) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
if (*len >= max_cnt)
return 1;
/* RQ header */
qp = phba->sli4_hba.nvmet_mrq_hdr[eqidx];
*len = __lpfc_idiag_print_rqpair(qp,
phba->sli4_hba.nvmet_mrq_data[eqidx],
"NVMET MRQ", pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
return 0;
}
......@@ -2977,7 +2998,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* will dump both fcp and nvme cqs/wqs for the eq */
rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len,
max_cnt, qp->queue_id);
max_cnt, x, qp->queue_id);
if (rc)
goto too_big;
......
This diff is collapsed.
This diff is collapsed.
......@@ -2081,6 +2081,9 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
if (phba->max_vpi && phba->cfg_enable_npiv)
bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
if (phba->nvmet_support)
bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
return;
}
......@@ -2448,6 +2451,26 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
/* addr mode is bit wise inverted value of fcf addr_mode */
bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
(~phba->fcf.addr_mode) & 0x3);
} else {
/* This is ONLY for NVMET MRQ == 1 */
if (phba->cfg_nvmet_mrq != 1)
return;
bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
/* Match type FCP - rq_id0 */
bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP);
bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff);
bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi,
FC_RCTL_DD_UNSOL_CMD);
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi,
phba->sli4_hba.hdr_rq->queue_id);
/* Match everything else - rq_id1 */
bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0);
}
bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
......@@ -2460,6 +2483,70 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
}
}
/**
* lpfc_reg_fcfi_mrq - Initialize the REG_FCFI_MRQ mailbox command
* @phba: pointer to the hba structure containing the FCF index and RQ ID.
* @mbox: pointer to lpfc mbox command to initialize.
* @mode: 0 to register FCFI, 1 to register MRQs
*
* The REG_FCFI_MRQ mailbox command supports Fibre Channel Forwarders (FCFs).
* The SLI Host uses the command to activate an FCF after it has acquired FCF
* information via a READ_FCF mailbox command. This mailbox command also is used
* to indicate where received unsolicited frames from this FCF will be sent. By
* default this routine will set up the FCF to forward all unsolicited frames
* the the RQ ID passed in the @phba. This can be overridden by the caller for
* more complicated setups.
**/
void
lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode)
{
struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi;
/* This is ONLY for MRQ */
if (phba->cfg_nvmet_mrq <= 1)
return;
memset(mbox, 0, sizeof(*mbox));
reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ);
if (mode == 0) {
bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi,
phba->fcf.current_rec.fcf_indx);
if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi,
phba->fcf.current_rec.vlan_id);
}
return;
}
bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi,
phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
/* Match NVME frames of type FCP (protocol NVME) - rq_id0 */
bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP);
bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff);
bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD);
bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff);
bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3); /* NVME connection id */
bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1); /* rq_id0 */
bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq);
bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi,
phba->sli4_hba.hdr_rq->queue_id);
/* Match everything - rq_id1 */
bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
}
/**
* lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
......
This diff is collapsed.
......@@ -550,6 +550,9 @@ struct lpfc_sli4_hba {
struct lpfc_queue **hba_eq; /* Event queues for HBA */
struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */
struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */
struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
uint16_t *fcp_cq_map;
......@@ -655,6 +658,8 @@ struct lpfc_sli4_hba {
uint16_t num_online_cpu;
uint16_t num_present_cpu;
uint16_t curr_disp_cpu;
uint16_t nvmet_mrq_post_idx;
};
enum lpfc_sge_type {
......@@ -742,12 +747,18 @@ int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
struct lpfc_queue **eqp, uint32_t type,
uint32_t subtype);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, struct lpfc_queue *, uint32_t);
int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
struct lpfc_queue **drqp, struct lpfc_queue **cqp,
uint32_t subtype);
void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment