Commit a3667aae authored by Naresh Kumar Inna's avatar Naresh Kumar Inna Committed by James Bottomley

[SCSI] csiostor: Chelsio FCoE offload driver

Signed-off-by: default avatarNaresh Kumar Inna <naresh@chelsio.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent ce91a923
...@@ -1812,6 +1812,7 @@ config SCSI_VIRTIO ...@@ -1812,6 +1812,7 @@ config SCSI_VIRTIO
This is the virtual HBA driver for virtio. If the kernel will This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M. be used in a virtual machine, say Y or M.
source "drivers/scsi/csiostor/Kconfig"
endif # SCSI_LOWLEVEL endif # SCSI_LOWLEVEL
......
...@@ -90,6 +90,7 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ ...@@ -90,6 +90,7 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/ obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_PAS16) += pas16.o obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
......
config SCSI_CHELSIO_FCOE
tristate "Chelsio Communications FCoE support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
select FW_LOADER
help
This driver supports FCoE Offload functionality over
Chelsio T4-based 10Gb Converged Network Adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module choose M here; the module
will be called csiostor.
#
## Chelsio FCoE driver
#
##
ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
#include <scsi/fc/fc_fs.h>
#include "csio_init.h"
static void
csio_vport_set_state(struct csio_lnode *ln);
/*
* csio_reg_rnode - Register a remote port with FC transport.
* @rn: Rnode representing remote port.
*
* Call fc_remote_port_add() to register this remote port with FC transport.
* If remote port is Initiator OR Target OR both, change the role appropriately.
*
*/
void
csio_reg_rnode(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
struct Scsi_Host *shost = csio_ln_to_shost(ln);
struct fc_rport_identifiers ids;
struct fc_rport *rport;
struct csio_service_parms *sp;
ids.node_name = wwn_to_u64(csio_rn_wwnn(rn));
ids.port_name = wwn_to_u64(csio_rn_wwpn(rn));
ids.port_id = rn->nport_id;
ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {
rport = rn->rport;
CSIO_ASSERT(rport != NULL);
goto update_role;
}
rn->rport = fc_remote_port_add(shost, 0, &ids);
if (!rn->rport) {
csio_ln_err(ln, "Failed to register rport = 0x%x.\n",
rn->nport_id);
return;
}
ln->num_reg_rnodes++;
rport = rn->rport;
spin_lock_irq(shost->host_lock);
*((struct csio_rnode **)rport->dd_data) = rn;
spin_unlock_irq(shost->host_lock);
sp = &rn->rn_sparm;
rport->maxframe_size = sp->csp.sp_bb_data;
if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID)
rport->supported_classes = FC_COS_CLASS3;
else
rport->supported_classes = FC_COS_UNSPECIFIED;
update_role:
if (rn->role & CSIO_RNFR_INITIATOR)
ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (rn->role & CSIO_RNFR_TARGET)
ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
if (ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, ids.roles);
rn->scsi_id = rport->scsi_target_id;
csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",
rn->nport_id, ids.roles);
}
/*
* csio_unreg_rnode - Unregister a remote port with FC transport.
* @rn: Rnode representing remote port.
*
* Call fc_remote_port_delete() to unregister this remote port with FC
* transport.
*
*/
void
csio_unreg_rnode(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
struct fc_rport *rport = rn->rport;
rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);
fc_remote_port_delete(rport);
ln->num_reg_rnodes--;
csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id);
}
/*
* csio_lnode_async_event - Async events from local port.
* @ln: lnode representing local port.
*
* Async events from local node that FC transport/SCSI ML
* should be made aware of (Eg: RSCN).
*/
void
csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt)
{
switch (fc_evt) {
case CSIO_LN_FC_RSCN:
/* Get payload of rscn from ln */
/* For each RSCN entry */
/*
* fc_host_post_event(shost,
* fc_get_event_number(),
* FCH_EVT_RSCN,
* rscn_entry);
*/
break;
case CSIO_LN_FC_LINKUP:
/* send fc_host_post_event */
/* set vport state */
if (csio_is_npiv_ln(ln))
csio_vport_set_state(ln);
break;
case CSIO_LN_FC_LINKDOWN:
/* send fc_host_post_event */
/* set vport state */
if (csio_is_npiv_ln(ln))
csio_vport_set_state(ln);
break;
case CSIO_LN_FC_ATTRIB_UPDATE:
csio_fchost_attr_init(ln);
break;
default:
break;
}
}
/*
* csio_fchost_attr_init - Initialize FC transport attributes
* @ln: Lnode.
*
*/
void
csio_fchost_attr_init(struct csio_lnode *ln)
{
struct Scsi_Host *shost = csio_ln_to_shost(ln);
fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln));
fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln));
fc_host_supported_classes(shost) = FC_COS_CLASS3;
fc_host_max_npiv_vports(shost) =
(csio_lnode_to_hw(ln))->fres_info.max_vnps;
fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT |
FC_PORTSPEED_1GBIT;
fc_host_maxframe_size(shost) = ln->ln_sparm.csp.sp_bb_data;
memset(fc_host_supported_fc4s(shost), 0,
sizeof(fc_host_supported_fc4s(shost)));
fc_host_supported_fc4s(shost)[7] = 1;
memset(fc_host_active_fc4s(shost), 0,
sizeof(fc_host_active_fc4s(shost)));
fc_host_active_fc4s(shost)[7] = 1;
}
/*
* csio_get_host_port_id - sysfs entries for nport_id is
* populated/cached from this function
*/
static void
csio_get_host_port_id(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
fc_host_port_id(shost) = ln->nport_id;
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_port_type - Return FC local port type.
* @shost: scsi host.
*
*/
static void
csio_get_host_port_type(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
if (csio_is_npiv_ln(ln))
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
else
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_port_state - Return FC local port state.
* @shost: scsi host.
*
*/
static void
csio_get_host_port_state(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
char state[16];
spin_lock_irq(&hw->lock);
csio_lnode_state_to_str(ln, state);
if (!strcmp(state, "READY"))
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
else if (!strcmp(state, "OFFLINE"))
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
else
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_host_speed - Return link speed to FC transport.
* @shost: scsi host.
*
*/
static void
csio_get_host_speed(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
switch (hw->pport[ln->portid].link_speed) {
case FW_PORT_CAP_SPEED_1G:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
break;
case FW_PORT_CAP_SPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_host_fabric_name - Return fabric name
* @shost: scsi host.
*
*/
static void
csio_get_host_fabric_name(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_rnode *rn = NULL;
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI);
if (rn)
fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn));
else
fc_host_fabric_name(shost) = 0;
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_host_speed - Return FC transport statistics.
* @ln: Lnode.
*
*/
static struct fc_host_statistics *
csio_get_stats(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct fc_host_statistics *fhs = &ln->fch_stats;
struct fw_fcoe_port_stats fcoe_port_stats;
uint64_t seconds;
memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats));
csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats);
fhs->tx_frames += (fcoe_port_stats.tx_bcast_frames +
fcoe_port_stats.tx_mcast_frames +
fcoe_port_stats.tx_ucast_frames +
fcoe_port_stats.tx_offload_frames);
fhs->tx_words += (fcoe_port_stats.tx_bcast_bytes +
fcoe_port_stats.tx_mcast_bytes +
fcoe_port_stats.tx_ucast_bytes +
fcoe_port_stats.tx_offload_bytes) /
CSIO_WORD_TO_BYTE;
fhs->rx_frames += (fcoe_port_stats.rx_bcast_frames +
fcoe_port_stats.rx_mcast_frames +
fcoe_port_stats.rx_ucast_frames);
fhs->rx_words += (fcoe_port_stats.rx_bcast_bytes +
fcoe_port_stats.rx_mcast_bytes +
fcoe_port_stats.rx_ucast_bytes) /
CSIO_WORD_TO_BYTE;
fhs->error_frames += fcoe_port_stats.rx_err_frames;
fhs->fcp_input_requests += ln->stats.n_input_requests;
fhs->fcp_output_requests += ln->stats.n_output_requests;
fhs->fcp_control_requests += ln->stats.n_control_requests;
fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20;
fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20;
fhs->link_failure_count = ln->stats.n_link_down;
/* Reset stats for the device */
seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start;
do_div(seconds, 1000);
fhs->seconds_since_last_reset = seconds;
return fhs;
}
/*
* csio_set_rport_loss_tmo - Set the rport dev loss timeout
* @rport: fc rport.
* @timeout: new value for dev loss tmo.
*
* If timeout is non zero set the dev_loss_tmo to timeout, else set
* dev_loss_tmo to one.
*/
static void
csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
}
static void
csio_vport_set_state(struct csio_lnode *ln)
{
struct fc_vport *fc_vport = ln->fc_vport;
struct csio_lnode *pln = ln->pln;
char state[16];
/* Set fc vport state based on phyiscal lnode */
csio_lnode_state_to_str(pln, state);
if (strcmp(state, "READY")) {
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
return;
}
if (!(pln->flags & CSIO_LNF_NPIVSUPP)) {
fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP);
return;
}
/* Set fc vport state based on virtual lnode */
csio_lnode_state_to_str(ln, state);
if (strcmp(state, "READY")) {
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
return;
}
fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}
static int
csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
{
struct csio_lnode *pln;
struct csio_mb *mbp;
struct fw_fcoe_vnp_cmd *rsp;
int ret = 0;
int retry = 0;
/* Issue VNP cmd to alloc vport */
/* Allocate Mbox request */
spin_lock_irq(&hw->lock);
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
CSIO_INC_STATS(hw, n_err_nomem);
ret = -ENOMEM;
goto out;
}
pln = ln->pln;
ln->fcf_flowid = pln->fcf_flowid;
ln->portid = pln->portid;
csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
pln->fcf_flowid, pln->vnp_flowid, 0,
csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL);
for (retry = 0; retry < 3; retry++) {
/* FW is expected to complete vnp cmd in immediate mode
* without much delay.
* Otherwise, there will be increase in IO latency since HW
* lock is held till completion of vnp mbox cmd.
*/
ret = csio_mb_issue(hw, mbp);
if (ret != -EBUSY)
break;
/* Retry if mbox returns busy */
spin_unlock_irq(&hw->lock);
msleep(2000);
spin_lock_irq(&hw->lock);
}
if (ret) {
csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
goto out_free;
}
/* Process Mbox response of VNP command */
rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
ret = -EINVAL;
goto out_free;
}
ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET(
ntohl(rsp->gen_wwn_to_vnpi));
memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid);
csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n",
ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1],
ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3],
ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5],
ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]);
csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n",
ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1],
ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3],
ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5],
ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]);
out_free:
mempool_free(mbp, hw->mb_mempool);
out:
spin_unlock_irq(&hw->lock);
return ret;
}
static int
csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
{
struct csio_lnode *pln;
struct csio_mb *mbp;
struct fw_fcoe_vnp_cmd *rsp;
int ret = 0;
int retry = 0;
/* Issue VNP cmd to free vport */
/* Allocate Mbox request */
spin_lock_irq(&hw->lock);
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
CSIO_INC_STATS(hw, n_err_nomem);
ret = -ENOMEM;
goto out;
}
pln = ln->pln;
csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
ln->fcf_flowid, ln->vnp_flowid,
NULL);
for (retry = 0; retry < 3; retry++) {
ret = csio_mb_issue(hw, mbp);
if (ret != -EBUSY)
break;
/* Retry if mbox returns busy */
spin_unlock_irq(&hw->lock);
msleep(2000);
spin_lock_irq(&hw->lock);
}
if (ret) {
csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
goto out_free;
}
/* Process Mbox response of VNP command */
rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
ret = -EINVAL;
}
out_free:
mempool_free(mbp, hw->mb_mempool);
out:
spin_unlock_irq(&hw->lock);
return ret;
}
static int
csio_vport_create(struct fc_vport *fc_vport, bool disable)
{
struct Scsi_Host *shost = fc_vport->shost;
struct csio_lnode *pln = shost_priv(shost);
struct csio_lnode *ln = NULL;
struct csio_hw *hw = csio_lnode_to_hw(pln);
uint8_t wwn[8];
int ret = -1;
ln = csio_shost_init(hw, &fc_vport->dev, false, pln);
if (!ln)
goto error;
if (fc_vport->node_name != 0) {
u64_to_wwn(fc_vport->node_name, wwn);
if (!CSIO_VALID_WWN(wwn)) {
csio_ln_err(ln,
"vport create failed. Invalid wwnn\n");
goto error;
}
memcpy(csio_ln_wwnn(ln), wwn, 8);
}
if (fc_vport->port_name != 0) {
u64_to_wwn(fc_vport->port_name, wwn);
if (!CSIO_VALID_WWN(wwn)) {
csio_ln_err(ln,
"vport create failed. Invalid wwpn\n");
goto error;
}
if (csio_lnode_lookup_by_wwpn(hw, wwn)) {
csio_ln_err(ln,
"vport create failed. wwpn already exists\n");
goto error;
}
memcpy(csio_ln_wwpn(ln), wwn, 8);
}
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
if (csio_fcoe_alloc_vnp(hw, ln))
goto error;
*(struct csio_lnode **)fc_vport->dd_data = ln;
ln->fc_vport = fc_vport;
if (!fc_vport->node_name)
fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
if (!fc_vport->port_name)
fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln));
csio_fchost_attr_init(ln);
return 0;
error:
if (ln)
csio_shost_exit(ln);
return ret;
}
static int
csio_vport_delete(struct fc_vport *fc_vport)
{
struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
struct Scsi_Host *shost = csio_ln_to_shost(ln);
struct csio_hw *hw = csio_lnode_to_hw(ln);
int rmv;
spin_lock_irq(&hw->lock);
rmv = csio_is_hw_removing(hw);
spin_unlock_irq(&hw->lock);
if (rmv) {
csio_shost_exit(ln);
return 0;
}
/* Quiesce ios and send remove event to lnode */
scsi_block_requests(shost);
spin_lock_irq(&hw->lock);
csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
csio_lnode_close(ln);
spin_unlock_irq(&hw->lock);
scsi_unblock_requests(shost);
/* Free vnp */
if (fc_vport->vport_state != FC_VPORT_DISABLED)
csio_fcoe_free_vnp(hw, ln);
csio_shost_exit(ln);
return 0;
}
static int
csio_vport_disable(struct fc_vport *fc_vport, bool disable)
{
struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
struct Scsi_Host *shost = csio_ln_to_shost(ln);
struct csio_hw *hw = csio_lnode_to_hw(ln);
/* disable vport */
if (disable) {
/* Quiesce ios and send stop event to lnode */
scsi_block_requests(shost);
spin_lock_irq(&hw->lock);
csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
csio_lnode_stop(ln);
spin_unlock_irq(&hw->lock);
scsi_unblock_requests(shost);
/* Free vnp */
csio_fcoe_free_vnp(hw, ln);
fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
csio_ln_err(ln, "vport disabled\n");
return 0;
} else {
/* enable vport */
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
if (csio_fcoe_alloc_vnp(hw, ln)) {
csio_ln_err(ln, "vport enabled failed.\n");
return -1;
}
csio_ln_err(ln, "vport enabled\n");
return 0;
}
}
static void
csio_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct csio_rnode *rn;
struct csio_hw *hw;
struct csio_lnode *ln;
rn = *((struct csio_rnode **)rport->dd_data);
ln = csio_rnode_to_lnode(rn);
hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
/* return if driver is being removed or same rnode comes back online */
if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn))
goto out;
csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n",
rn, rn->nport_id, csio_rn_flowid(rn));
CSIO_INC_STATS(ln, n_dev_loss_tmo);
/*
* enqueue devloss event to event worker thread to serialize all
* rnode events.
*/
if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) {
CSIO_INC_STATS(hw, n_evt_drop);
goto out;
}
if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irq(&hw->lock);
schedule_work(&hw->evtq_work);
return;
}
out:
spin_unlock_irq(&hw->lock);
}
/* FC transport functions template - Physical port */
struct fc_function_template csio_fc_transport_funcs = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_maxframe_size = 1,
.get_host_port_id = csio_get_host_port_id,
.show_host_port_id = 1,
.get_host_port_type = csio_get_host_port_type,
.show_host_port_type = 1,
.get_host_port_state = csio_get_host_port_state,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
.get_host_speed = csio_get_host_speed,
.show_host_speed = 1,
.get_host_fabric_name = csio_get_host_fabric_name,
.show_host_fabric_name = 1,
.get_fc_host_stats = csio_get_stats,
.dd_fcrport_size = sizeof(struct csio_rnode *),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.show_starget_port_id = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
.dd_fcvport_size = sizeof(struct csio_lnode *),
.vport_create = csio_vport_create,
.vport_disable = csio_vport_disable,
.vport_delete = csio_vport_delete,
};
/* FC transport functions template - Virtual port */
struct fc_function_template csio_fc_transport_vport_funcs = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_maxframe_size = 1,
.get_host_port_id = csio_get_host_port_id,
.show_host_port_id = 1,
.get_host_port_type = csio_get_host_port_type,
.show_host_port_type = 1,
.get_host_port_state = csio_get_host_port_state,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
.get_host_speed = csio_get_host_speed,
.show_host_speed = 1,
.get_host_fabric_name = csio_get_host_fabric_name,
.show_host_fabric_name = 1,
.get_fc_host_stats = csio_get_stats,
.dd_fcrport_size = sizeof(struct csio_rnode *),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.show_starget_port_id = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
};
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_DEFS_H__
#define __CSIO_DEFS_H__
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#define CSIO_INVALID_IDX 0xFFFFFFFF
#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++)
#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--)
#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false)
#define CSIO_DID_MASK 0xFFFFFF
#define CSIO_WORD_TO_BYTE 4
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
return readl(addr) + ((u64)readl(addr + 4) << 32);
}
static inline void writeq(u64 val, void __iomem *addr)
{
writel(val, addr);
writel(val >> 32, addr + 4);
}
#endif
static inline int
csio_list_deleted(struct list_head *list)
{
return ((list->next == list) && (list->prev == list));
}
#define csio_list_next(elem) (((struct list_head *)(elem))->next)
#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
/* State machine */
typedef void (*csio_sm_state_t)(void *, uint32_t);
struct csio_sm {
struct list_head sm_list;
csio_sm_state_t sm_state;
};
static inline void
csio_set_state(void *smp, void *state)
{
((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
}
static inline void
csio_init_state(struct csio_sm *smp, void *state)
{
csio_set_state(smp, state);
}
static inline void
csio_post_event(void *smp, uint32_t evt)
{
((struct csio_sm *)smp)->sm_state(smp, evt);
}
static inline csio_sm_state_t
csio_get_state(void *smp)
{
return ((struct csio_sm *)smp)->sm_state;
}
static inline bool
csio_match_state(void *smp, void *state)
{
return (csio_get_state(smp) == (csio_sm_state_t)state);
}
#define CSIO_ASSERT(cond) BUG_ON(!(cond))
#ifdef __CSIO_DEBUG__
#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c))
#else
#define CSIO_DB_ASSERT(__c)
#endif
#endif /* ifndef __CSIO_DEFS_H__ */
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_HW_H__
#define __CSIO_HW_H__
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/compiler.h>
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/io.h>
#include <linux/spinlock_types.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include "csio_wr.h"
#include "csio_mb.h"
#include "csio_scsi.h"
#include "csio_defs.h"
#include "t4_regs.h"
#include "t4_msg.h"
/*
* An error value used by host. Should not clash with FW defined return values.
*/
#define FW_HOSTERROR 255
#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
#define FW_VERSION_MAJOR 1
#define FW_VERSION_MINOR 2
#define FW_VERSION_MICRO 8
#define CSIO_HW_NAME "Chelsio FCoE Adapter"
#define CSIO_MAX_PFN 8
#define CSIO_MAX_PPORTS 4
#define CSIO_MAX_LUN 0xFFFF
#define CSIO_MAX_QUEUE 2048
#define CSIO_MAX_CMD_PER_LUN 32
#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
#define CSIO_MAX_SECTOR_SIZE 128
/* Interrupts */
#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
* (Forward intr iq + fw iq) */
#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */
#define CSIO_MAX_SCSI_CPU 128
#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS)
#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS)
/* Queues */
enum {
CSIO_INTR_WRSIZE = 128,
CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE),
CSIO_FWEVT_WRSIZE = 128,
CSIO_FWEVT_IQLEN = 128,
CSIO_FWEVT_FLBUFS = 64,
CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN),
CSIO_HW_NIQ = 1,
CSIO_HW_NFLQ = 1,
CSIO_HW_NEQ = 1,
CSIO_HW_NINTXQ = 1,
};
struct csio_msix_entries {
unsigned short vector; /* Vector assigned by pci_enable_msix */
void *dev_id; /* Priv object associated w/ this msix*/
char desc[24]; /* Description of this vector */
};
struct csio_scsi_qset {
int iq_idx; /* Ingress index */
int eq_idx; /* Egress index */
uint32_t intr_idx; /* MSIX Vector index */
};
struct csio_scsi_cpu_info {
int16_t max_cpus;
};
extern int csio_dbg_level;
extern int csio_force_master;
extern unsigned int csio_port_mask;
extern int csio_msi;
#define CSIO_VENDOR_ID 0x1425
#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
#define CSIO_FPGA 0xA000
#define CSIO_T4_FCOE_ASIC 0x4600
#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
EDC1 | LE | TP | MA | PM_TX | PM_RX | \
ULP_RX | CPL_SWITCH | SGE | \
ULP_TX | SF)
/*
* Hard parameters used to initialize the card in the absence of a
* configuration file.
*/
enum {
/* General */
CSIO_SGE_DBFIFO_INT_THRESH = 10,
CSIO_SGE_RX_DMA_OFFSET = 2,
CSIO_SGE_FLBUF_SIZE1 = 65536,
CSIO_SGE_FLBUF_SIZE2 = 1536,
CSIO_SGE_FLBUF_SIZE3 = 9024,
CSIO_SGE_FLBUF_SIZE4 = 9216,
CSIO_SGE_FLBUF_SIZE5 = 2048,
CSIO_SGE_FLBUF_SIZE6 = 128,
CSIO_SGE_FLBUF_SIZE7 = 8192,
CSIO_SGE_FLBUF_SIZE8 = 16384,
CSIO_SGE_TIMER_VAL_0 = 5,
CSIO_SGE_TIMER_VAL_1 = 10,
CSIO_SGE_TIMER_VAL_2 = 20,
CSIO_SGE_TIMER_VAL_3 = 50,
CSIO_SGE_TIMER_VAL_4 = 100,
CSIO_SGE_TIMER_VAL_5 = 200,
CSIO_SGE_INT_CNT_VAL_0 = 1,
CSIO_SGE_INT_CNT_VAL_1 = 4,
CSIO_SGE_INT_CNT_VAL_2 = 8,
CSIO_SGE_INT_CNT_VAL_3 = 16,
/* Storage specific - used by FW_PFVF_CMD */
CSIO_WX_CAPS = FW_CMD_CAP_PF, /* w/x all */
CSIO_R_CAPS = FW_CMD_CAP_PF, /* r all */
CSIO_NVI = 4,
CSIO_NIQ_FLINT = 34,
CSIO_NETH_CTRL = 32,
CSIO_NEQ = 66,
CSIO_NEXACTF = 32,
CSIO_CMASK = FW_PFVF_CMD_CMASK_MASK,
CSIO_PMASK = FW_PFVF_CMD_PMASK_MASK,
};
/* Slowpath events */
enum csio_evt {
CSIO_EVT_FW = 0, /* FW event */
CSIO_EVT_MBX, /* MBX event */
CSIO_EVT_SCN, /* State change notification */
CSIO_EVT_DEV_LOSS, /* Device loss event */
CSIO_EVT_MAX, /* Max supported event */
};
#define CSIO_EVT_MSG_SIZE 512
#define CSIO_EVTQ_SIZE 512
/* Event msg */
struct csio_evt_msg {
struct list_head list; /* evt queue*/
enum csio_evt type;
uint8_t data[CSIO_EVT_MSG_SIZE];
};
enum {
EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
SERNUM_LEN = 16, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
TRACE_LEN = 112, /* length of trace data and mask */
};
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
};
enum { MEM_EDC0, MEM_EDC1, MEM_MC };
enum {
MEMWIN0_APERTURE = 2048,
MEMWIN0_BASE = 0x1b800,
MEMWIN1_APERTURE = 32768,
MEMWIN1_BASE = 0x28000,
MEMWIN2_APERTURE = 65536,
MEMWIN2_BASE = 0x30000,
};
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
/* flash command opcodes */
SF_PROG_PAGE = 2, /* program page */
SF_WR_DISABLE = 4, /* disable writes */
SF_RD_STATUS = 5, /* read status register */
SF_WR_ENABLE = 6, /* enable writes */
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
FW_START_SEC = 8, /* first flash sector for FW */
FW_END_SEC = 15, /* last flash sector for FW */
FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
FLASH_CFG_OFFSET = 0x1f0000,
FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
* at 1MB - 64KB */
FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
};
/*
* Flash layout.
*/
#define FLASH_START(start) ((start) * SF_SEC_SIZE)
#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
enum {
/*
* Location of firmware image in FLASH.
*/
FLASH_FW_START_SEC = 8,
FLASH_FW_NSECS = 8,
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
};
#undef FLASH_START
#undef FLASH_MAX_SIZE
/* Management module */
enum {
CSIO_MGMT_EQ_WRSIZE = 512,
CSIO_MGMT_IQ_WRSIZE = 128,
CSIO_MGMT_EQLEN = 64,
CSIO_MGMT_IQLEN = 64,
};
#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE)
#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE)
/* mgmt module stats */
struct csio_mgmtm_stats {
uint32_t n_abort_req; /* Total abort request */
uint32_t n_abort_rsp; /* Total abort response */
uint32_t n_close_req; /* Total close request */
uint32_t n_close_rsp; /* Total close response */
uint32_t n_err; /* Total Errors */
uint32_t n_drop; /* Total request dropped */
uint32_t n_active; /* Count of active_q */
uint32_t n_cbfn; /* Count of cbfn_q */
};
/* MGMT module */
struct csio_mgmtm {
struct csio_hw *hw; /* Pointer to HW moduel */
int eq_idx; /* Egress queue index */
int iq_idx; /* Ingress queue index */
int msi_vec; /* MSI vector */
struct list_head active_q; /* Outstanding ELS/CT */
struct list_head abort_q; /* Outstanding abort req */
struct list_head cbfn_q; /* Completion queue */
struct list_head mgmt_req_freelist; /* Free poll of reqs */
/* ELSCT request freelist*/
struct timer_list mgmt_timer; /* MGMT timer */
struct csio_mgmtm_stats stats; /* ELS/CT stats */
};
struct csio_adap_desc {
char model_no[16];
char description[32];
};
struct pci_params {
uint16_t vendor_id;
uint16_t device_id;
uint32_t vpd_cap_addr;
uint16_t speed;
uint8_t width;
};
/* User configurable hw parameters */
struct csio_hw_params {
uint32_t sf_size; /* serial flash
* size in bytes
*/
uint32_t sf_nsec; /* # of flash sectors */
struct pci_params pci;
uint32_t log_level; /* Module-level for
* debug log.
*/
};
struct csio_vpd {
uint32_t cclk;
uint8_t ec[EC_LEN + 1];
uint8_t sn[SERNUM_LEN + 1];
uint8_t id[ID_LEN + 1];
};
struct csio_pport {
uint16_t pcap;
uint8_t portid;
uint8_t link_status;
uint16_t link_speed;
uint8_t mac[6];
uint8_t mod_type;
uint8_t rsvd1;
uint8_t rsvd2;
uint8_t rsvd3;
};
/* fcoe resource information */
struct csio_fcoe_res_info {
uint16_t e_d_tov;
uint16_t r_a_tov_seq;
uint16_t r_a_tov_els;
uint16_t r_r_tov;
uint32_t max_xchgs;
uint32_t max_ssns;
uint32_t used_xchgs;
uint32_t used_ssns;
uint32_t max_fcfs;
uint32_t max_vnps;
uint32_t used_fcfs;
uint32_t used_vnps;
};
/* HW State machine Events */
enum csio_hw_ev {
CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */
CSIO_HWE_INIT, /* Config done, start Init */
CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */
CSIO_HWE_FATAL, /* Fatal error during initialization */
CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */
CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */
CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */
CSIO_HWE_QUIESCED, /* HBA quiesced */
CSIO_HWE_HBA_RESET, /* HBA reset requested */
CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */
CSIO_HWE_FW_DLOAD, /* FW download requested */
CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */
CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */
CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */
CSIO_HWE_MAX, /* Max HW event */
};
/* hw stats */
struct csio_hw_stats {
uint32_t n_evt_activeq; /* Number of event in active Q */
uint32_t n_evt_freeq; /* Number of event in free Q */
uint32_t n_evt_drop; /* Number of event droped */
uint32_t n_evt_unexp; /* Number of unexpected events */
uint32_t n_pcich_offline;/* Number of pci channel offline */
uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */
uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/
uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/
uint32_t n_cpl_unexp; /* Number of unexpected cpl */
uint32_t n_mbint_unexp; /* Number of unexpected mbox */
/* interrupt */
uint32_t n_plint_unexp; /* Number of unexpected PL */
/* interrupt */
uint32_t n_plint_cnt; /* Number of PL interrupt */
uint32_t n_int_stray; /* Number of stray interrupt */
uint32_t n_err; /* Number of hw errors */
uint32_t n_err_fatal; /* Number of fatal errors */
uint32_t n_err_nomem; /* Number of memory alloc failure */
uint32_t n_err_io; /* Number of IO failure */
enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */
uint64_t n_reset_start; /* Start time after the reset */
uint32_t rsvd1;
};
/* Defines for hw->flags */
#define CSIO_HWF_MASTER 0x00000001 /* This is the Master
* function for the
* card.
*/
#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt
* enable bit set?
*/
#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */
#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been
* allocated memory.
*/
#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been
* allocated in FW.
*/
#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */
#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device
* id cached */
#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing
* FW events
*/
#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config
* params
*/
#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
* enabled?
*/
#define csio_is_hw_intr_enabled(__hw) \
((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
#define csio_is_host_intr_enabled(__hw) \
((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED)
#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER)
#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID)
#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED)
#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID)
#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED)
/* Defines for intr_mode */
enum csio_intr_mode {
CSIO_IM_NONE = 0,
CSIO_IM_INTX = 1,
CSIO_IM_MSI = 2,
CSIO_IM_MSIX = 3,
};
/* Master HW structure: One per function */
struct csio_hw {
struct csio_sm sm; /* State machine: should
* be the 1st member.
*/
spinlock_t lock; /* Lock for hw */
struct csio_scsim scsim; /* SCSI module*/
struct csio_wrm wrm; /* Work request module*/
struct pci_dev *pdev; /* PCI device */
void __iomem *regstart; /* Virtual address of
* register map
*/
/* SCSI queue sets */
uint32_t num_sqsets; /* Number of SCSI
* queue sets */
uint32_t num_scsi_msix_cpus; /* Number of CPUs that
* will be used
* for ingress
* processing.
*/
struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU];
struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS];
uint32_t evtflag; /* Event flag */
uint32_t flags; /* HW flags */
struct csio_mgmtm mgmtm; /* management module */
struct csio_mbm mbm; /* Mailbox module */
/* Lnodes */
uint32_t num_lns; /* Number of lnodes */
struct csio_lnode *rln; /* Root lnode */
struct list_head sln_head; /* Sibling node list
* list
*/
int intr_iq_idx; /* Forward interrupt
* queue.
*/
int fwevt_iq_idx; /* FW evt queue */
struct work_struct evtq_work; /* Worker thread for
* HW events.
*/
struct list_head evt_free_q; /* freelist of evt
* elements
*/
struct list_head evt_active_q; /* active evt queue*/
/* board related info */
char name[32];
char hw_ver[16];
char model_desc[32];
char drv_version[32];
char fwrev_str[32];
uint32_t optrom_ver;
uint32_t fwrev;
uint32_t tp_vers;
char chip_ver;
uint32_t cfg_finiver;
uint32_t cfg_finicsum;
uint32_t cfg_cfcsum;
uint8_t cfg_csum_status;
uint8_t cfg_store;
enum csio_dev_state fw_state;
struct csio_vpd vpd;
uint8_t pfn; /* Physical Function
* number
*/
uint32_t port_vec; /* Port vector */
uint8_t num_pports; /* Number of physical
* ports.
*/
uint8_t rst_retries; /* Reset retries */
uint8_t cur_evt; /* current s/m evt */
uint8_t prev_evt; /* Previous s/m evt */
uint32_t dev_num; /* device number */
struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */
struct csio_hw_params params; /* Hw parameters */
struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */
mempool_t *mb_mempool; /* Mailbox memory pool*/
mempool_t *rnode_mempool; /* rnode memory pool */
/* Interrupt */
enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */
uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt
* index
*/
uint32_t nondata_intr_idx; /* nondata MSIX/intr
* idx
*/
uint8_t cfg_neq; /* FW configured no of
* egress queues
*/
uint8_t cfg_niq; /* FW configured no of
* iq queues.
*/
struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
/* MSIX vectors */
struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
struct dentry *debugfs_root; /* Debug FS */
struct csio_hw_stats stats; /* Hw statistics */
};
/* Register access macros */
#define csio_reg(_b, _r) ((_b) + (_r))
#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r)))
#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r)))
#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r)))
#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r)))
#define csio_wr_reg8(_h, _v, _r) writeb((_v), \
csio_reg((_h)->regstart, (_r)))
#define csio_wr_reg16(_h, _v, _r) writew((_v), \
csio_reg((_h)->regstart, (_r)))
#define csio_wr_reg32(_h, _v, _r) writel((_v), \
csio_reg((_h)->regstart, (_r)))
#define csio_wr_reg64(_h, _v, _r) writeq((_v), \
csio_reg((_h)->regstart, (_r)))
void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t);
/* Core clocks <==> uSecs */
static inline uint32_t
csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks)
{
/* add Core Clock / 2 to round ticks to nearest uS */
return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk;
}
static inline uint32_t
csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
{
return (us * hw->vpd.cclk) / 1000;
}
/* Easy access macros */
#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm))
#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm))
#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim))
#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm))
#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number)
#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn))
#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn))
#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i))
#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx)
#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i))
#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx)
/* Printing/logging */
#define CSIO_DEVID(__dev) ((__dev)->dev_num)
#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF)
#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF)
#define csio_info(__hw, __fmt, ...) \
dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
#define csio_fatal(__hw, __fmt, ...) \
dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
#define csio_err(__hw, __fmt, ...) \
dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
#define csio_warn(__hw, __fmt, ...) \
dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
#ifdef __CSIO_DEBUG__
#define csio_dbg(__hw, __fmt, ...) \
csio_info((__hw), __fmt, ##__VA_ARGS__);
#else
#define csio_dbg(__hw, __fmt, ...)
#endif
int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
void csio_hw_intr_disable(struct csio_hw *);
int csio_hw_slow_intr_handler(struct csio_hw *hw);
int csio_hw_start(struct csio_hw *);
int csio_hw_stop(struct csio_hw *);
int csio_hw_reset(struct csio_hw *);
int csio_is_hw_ready(struct csio_hw *);
int csio_is_hw_removing(struct csio_hw *);
int csio_fwevtq_handler(struct csio_hw *);
void csio_evtq_worker(struct work_struct *);
int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type,
void *evt_msg, uint16_t len);
void csio_evtq_flush(struct csio_hw *hw);
int csio_request_irqs(struct csio_hw *);
void csio_intr_enable(struct csio_hw *);
void csio_intr_disable(struct csio_hw *, bool);
struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
int csio_config_queues(struct csio_hw *);
int csio_hw_mc_read(struct csio_hw *, uint32_t,
uint32_t *, uint64_t *);
int csio_hw_edc_read(struct csio_hw *, int, uint32_t, uint32_t *,
uint64_t *);
int csio_hw_init(struct csio_hw *);
void csio_hw_exit(struct csio_hw *);
#endif /* ifndef __CSIO_HW_H__ */
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/kdebug.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/string.h>
#include <linux/export.h>
#include "csio_init.h"
#include "csio_defs.h"
#define CSIO_MIN_MEMPOOL_SZ 64
static struct dentry *csio_debugfs_root;
static struct scsi_transport_template *csio_fcoe_transport;
static struct scsi_transport_template *csio_fcoe_transport_vport;
/*
* debugfs support
*/
static int
csio_mem_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t
csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
loff_t pos = *ppos;
loff_t avail = file->f_path.dentry->d_inode->i_size;
unsigned int mem = (uintptr_t)file->private_data & 3;
struct csio_hw *hw = file->private_data - mem;
if (pos < 0)
return -EINVAL;
if (pos >= avail)
return 0;
if (count > avail - pos)
count = avail - pos;
while (count) {
size_t len;
int ret, ofst;
__be32 data[16];
if (mem == MEM_MC)
ret = csio_hw_mc_read(hw, pos, data, NULL);
else
ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
if (ret)
return ret;
ofst = pos % sizeof(data);
len = min(count, sizeof(data) - ofst);
if (copy_to_user(buf, (u8 *)data + ofst, len))
return -EFAULT;
buf += len;
pos += len;
count -= len;
}
count = pos - *ppos;
*ppos = pos;
return count;
}
static const struct file_operations csio_mem_debugfs_fops = {
.owner = THIS_MODULE,
.open = csio_mem_open,
.read = csio_mem_read,
.llseek = default_llseek,
};
static void __devinit
csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
unsigned int idx, unsigned int size_mb)
{
struct dentry *de;
de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
(void *)hw + idx, &csio_mem_debugfs_fops);
if (de && de->d_inode)
de->d_inode->i_size = size_mb << 20;
}
static int __devinit
csio_setup_debugfs(struct csio_hw *hw)
{
int i;
if (IS_ERR_OR_NULL(hw->debugfs_root))
return -1;
i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
if (i & EDRAM0_ENABLE)
csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
if (i & EDRAM1_ENABLE)
csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
if (i & EXT_MEM_ENABLE)
csio_add_debugfs_mem(hw, "mc", MEM_MC,
EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
return 0;
}
/*
* csio_dfs_create - Creates and sets up per-hw debugfs.
*
*/
static int
csio_dfs_create(struct csio_hw *hw)
{
if (csio_debugfs_root) {
hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
csio_debugfs_root);
csio_setup_debugfs(hw);
}
return 0;
}
/*
* csio_dfs_destroy - Destroys per-hw debugfs.
*/
static int
csio_dfs_destroy(struct csio_hw *hw)
{
if (hw->debugfs_root)
debugfs_remove_recursive(hw->debugfs_root);
return 0;
}
/*
* csio_dfs_init - Debug filesystem initialization for the module.
*
*/
static int
csio_dfs_init(void)
{
csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!csio_debugfs_root)
pr_warn("Could not create debugfs entry, continuing\n");
return 0;
}
/*
* csio_dfs_exit - debugfs cleanup for the module.
*/
static void
csio_dfs_exit(void)
{
debugfs_remove(csio_debugfs_root);
}
/*
* csio_pci_init - PCI initialization.
* @pdev: PCI device.
* @bars: Bitmask of bars to be requested.
*
* Initializes the PCI function by enabling MMIO, setting bus
* mastership and setting DMA mask.
*/
static int
csio_pci_init(struct pci_dev *pdev, int *bars)
{
int rv = -ENODEV;
*bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_enable_device_mem(pdev))
goto err;
if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
goto err_disable_device;
pci_set_master(pdev);
pci_try_set_mwi(pdev);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else {
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_release_regions;
}
return 0;
err_release_regions:
pci_release_selected_regions(pdev, *bars);
err_disable_device:
pci_disable_device(pdev);
err:
return rv;
}
/*
* csio_pci_exit - PCI unitialization.
* @pdev: PCI device.
* @bars: Bars to be released.
*
*/
static void
csio_pci_exit(struct pci_dev *pdev, int *bars)
{
pci_release_selected_regions(pdev, *bars);
pci_disable_device(pdev);
}
/*
* csio_hw_init_workers - Initialize the HW module's worker threads.
* @hw: HW module.
*
*/
static void
csio_hw_init_workers(struct csio_hw *hw)
{
INIT_WORK(&hw->evtq_work, csio_evtq_worker);
}
static void
csio_hw_exit_workers(struct csio_hw *hw)
{
cancel_work_sync(&hw->evtq_work);
flush_scheduled_work();
}
static int
csio_create_queues(struct csio_hw *hw)
{
int i, j;
struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
int rv;
struct csio_scsi_cpu_info *info;
if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
return 0;
if (hw->intr_mode != CSIO_IM_MSIX) {
rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
0, hw->pport[0].portid, false, NULL);
if (rv != 0) {
csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
return rv;
}
}
/* FW event queue */
rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
csio_get_fwevt_intr_idx(hw),
hw->pport[0].portid, true, NULL);
if (rv != 0) {
csio_err(hw, "FW event IQ config failed!: %d\n", rv);
return rv;
}
/* Create mgmt queue */
rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
mgmtm->iq_idx, hw->pport[0].portid, NULL);
if (rv != 0) {
csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
goto err;
}
/* Create SCSI queues */
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < info->max_cpus; j++) {
struct csio_scsi_qset *sqset = &hw->sqset[i][j];
rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
sqset->intr_idx, i, false, NULL);
if (rv != 0) {
csio_err(hw,
"SCSI module IQ config failed [%d][%d]:%d\n",
i, j, rv);
goto err;
}
rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
sqset->iq_idx, i, NULL);
if (rv != 0) {
csio_err(hw,
"SCSI module EQ config failed [%d][%d]:%d\n",
i, j, rv);
goto err;
}
} /* for all CPUs */
} /* For all ports */
hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
return 0;
err:
csio_wr_destroy_queues(hw, true);
return -EINVAL;
}
/*
* csio_config_queues - Configure the DMA queues.
* @hw: HW module.
*
* Allocates memory for queues are registers them with FW.
*/
int
csio_config_queues(struct csio_hw *hw)
{
int i, j, idx, k = 0;
int rv;
struct csio_scsi_qset *sqset;
struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
struct csio_scsi_qset *orig;
struct csio_scsi_cpu_info *info;
if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
return csio_create_queues(hw);
/* Calculate number of SCSI queues for MSIX we would like */
hw->num_scsi_msix_cpus = num_online_cpus();
hw->num_sqsets = num_online_cpus() * hw->num_pports;
if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
}
/* Initialize max_cpus, may get reduced during msix allocations */
for (i = 0; i < hw->num_pports; i++)
hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
csio_dbg(hw, "nsqsets:%d scpus:%d\n",
hw->num_sqsets, hw->num_scsi_msix_cpus);
csio_intr_enable(hw);
if (hw->intr_mode != CSIO_IM_MSIX) {
/* Allocate Forward interrupt iq. */
hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
CSIO_INTR_WRSIZE, CSIO_INGRESS,
(void *)hw, 0, 0, NULL);
if (hw->intr_iq_idx == -1) {
csio_err(hw,
"Forward interrupt queue creation failed\n");
goto intr_disable;
}
}
/* Allocate the FW evt queue */
hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
CSIO_FWEVT_WRSIZE,
CSIO_INGRESS, (void *)hw,
CSIO_FWEVT_FLBUFS, 0,
csio_fwevt_intx_handler);
if (hw->fwevt_iq_idx == -1) {
csio_err(hw, "FW evt queue creation failed\n");
goto intr_disable;
}
/* Allocate the mgmt queue */
mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
CSIO_MGMT_EQ_WRSIZE,
CSIO_EGRESS, (void *)hw, 0, 0, NULL);
if (mgmtm->eq_idx == -1) {
csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
goto intr_disable;
}
/* Use FW IQ for MGMT req completion */
mgmtm->iq_idx = hw->fwevt_iq_idx;
/* Allocate SCSI queues */
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
sqset = &hw->sqset[i][j];
if (j >= info->max_cpus) {
k = j % info->max_cpus;
orig = &hw->sqset[i][k];
sqset->eq_idx = orig->eq_idx;
sqset->iq_idx = orig->iq_idx;
continue;
}
idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
CSIO_EGRESS, (void *)hw, 0, 0,
NULL);
if (idx == -1) {
csio_err(hw, "EQ creation failed for idx:%d\n",
idx);
goto intr_disable;
}
sqset->eq_idx = idx;
idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
(void *)hw, 0, 0,
csio_scsi_intx_handler);
if (idx == -1) {
csio_err(hw, "IQ creation failed for idx:%d\n",
idx);
goto intr_disable;
}
sqset->iq_idx = idx;
} /* for all CPUs */
} /* For all ports */
hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
rv = csio_create_queues(hw);
if (rv != 0)
goto intr_disable;
/*
* Now request IRQs for the vectors. In the event of a failure,
* cleanup is handled internally by this function.
*/
rv = csio_request_irqs(hw);
if (rv != 0)
return -EINVAL;
return 0;
intr_disable:
csio_intr_disable(hw, false);
return -EINVAL;
}
static int
csio_resource_alloc(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
int rv = -ENOMEM;
wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
sizeof(struct csio_mb));
if (!hw->mb_mempool)
goto err;
hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
sizeof(struct csio_rnode));
if (!hw->rnode_mempool)
goto err_free_mb_mempool;
hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
CSIO_SCSI_RSP_LEN, 8, 0);
if (!hw->scsi_pci_pool)
goto err_free_rn_pool;
return 0;
err_free_rn_pool:
mempool_destroy(hw->rnode_mempool);
hw->rnode_mempool = NULL;
err_free_mb_mempool:
mempool_destroy(hw->mb_mempool);
hw->mb_mempool = NULL;
err:
return rv;
}
static void
csio_resource_free(struct csio_hw *hw)
{
pci_pool_destroy(hw->scsi_pci_pool);
hw->scsi_pci_pool = NULL;
mempool_destroy(hw->rnode_mempool);
hw->rnode_mempool = NULL;
mempool_destroy(hw->mb_mempool);
hw->mb_mempool = NULL;
}
/*
* csio_hw_alloc - Allocate and initialize the HW module.
* @pdev: PCI device.
*
* Allocates HW structure, DMA, memory resources, maps BARS to
* host memory and initializes HW module.
*/
static struct csio_hw * __devinit
csio_hw_alloc(struct pci_dev *pdev)
{
struct csio_hw *hw;
hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
if (!hw)
goto err;
hw->pdev = pdev;
strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
/* memory pool/DMA pool allocation */
if (csio_resource_alloc(hw))
goto err_free_hw;
/* Get the start address of registers from BAR 0 */
hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!hw->regstart) {
csio_err(hw, "Could not map BAR 0, regstart = %p\n",
hw->regstart);
goto err_resource_free;
}
csio_hw_init_workers(hw);
if (csio_hw_init(hw))
goto err_unmap_bar;
csio_dfs_create(hw);
csio_dbg(hw, "hw:%p\n", hw);
return hw;
err_unmap_bar:
csio_hw_exit_workers(hw);
iounmap(hw->regstart);
err_resource_free:
csio_resource_free(hw);
err_free_hw:
kfree(hw);
err:
return NULL;
}
/*
* csio_hw_free - Uninitialize and free the HW module.
* @hw: The HW module
*
* Disable interrupts, uninit the HW module, free resources, free hw.
*/
static void
csio_hw_free(struct csio_hw *hw)
{
csio_intr_disable(hw, true);
csio_hw_exit_workers(hw);
csio_hw_exit(hw);
iounmap(hw->regstart);
csio_dfs_destroy(hw);
csio_resource_free(hw);
kfree(hw);
}
/**
* csio_shost_init - Create and initialize the lnode module.
* @hw: The HW module.
* @dev: The device associated with this invocation.
* @probe: Called from probe context or not?
* @os_pln: Parent lnode if any.
*
* Allocates lnode structure via scsi_host_alloc, initializes
* shost, initializes lnode module and registers with SCSI ML
* via scsi_host_add. This function is shared between physical and
* virtual node ports.
*/
struct csio_lnode *
csio_shost_init(struct csio_hw *hw, struct device *dev,
bool probe, struct csio_lnode *pln)
{
struct Scsi_Host *shost = NULL;
struct csio_lnode *ln;
csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
/*
* hw->pdev is the physical port's PCI dev structure,
* which will be different from the NPIV dev structure.
*/
if (dev == &hw->pdev->dev)
shost = scsi_host_alloc(
&csio_fcoe_shost_template,
sizeof(struct csio_lnode));
else
shost = scsi_host_alloc(
&csio_fcoe_shost_vport_template,
sizeof(struct csio_lnode));
if (!shost)
goto err;
ln = shost_priv(shost);
memset(ln, 0, sizeof(struct csio_lnode));
/* Link common lnode to this lnode */
ln->dev_num = (shost->host_no << 16);
shost->can_queue = CSIO_MAX_QUEUE;
shost->this_id = -1;
shost->unique_id = shost->host_no;
shost->max_cmd_len = 16; /* Max CDB length supported */
shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
hw->fres_info.max_ssns);
shost->max_lun = CSIO_MAX_LUN;
if (dev == &hw->pdev->dev)
shost->transportt = csio_fcoe_transport;
else
shost->transportt = csio_fcoe_transport_vport;
/* root lnode */
if (!hw->rln)
hw->rln = ln;
/* Other initialization here: Common, Transport specific */
if (csio_lnode_init(ln, hw, pln))
goto err_shost_put;
if (scsi_add_host(shost, dev))
goto err_lnode_exit;
return ln;
err_lnode_exit:
csio_lnode_exit(ln);
err_shost_put:
scsi_host_put(shost);
err:
return NULL;
}
/**
* csio_shost_exit - De-instantiate the shost.
* @ln: The lnode module corresponding to the shost.
*
*/
void
csio_shost_exit(struct csio_lnode *ln)
{
struct Scsi_Host *shost = csio_ln_to_shost(ln);
struct csio_hw *hw = csio_lnode_to_hw(ln);
/* Inform transport */
fc_remove_host(shost);
/* Inform SCSI ML */
scsi_remove_host(shost);
/* Flush all the events, so that any rnode removal events
* already queued are all handled, before we remove the lnode.
*/
spin_lock_irq(&hw->lock);
csio_evtq_flush(hw);
spin_unlock_irq(&hw->lock);
csio_lnode_exit(ln);
scsi_host_put(shost);
}
struct csio_lnode *
csio_lnode_alloc(struct csio_hw *hw)
{
return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
}
void
csio_lnodes_block_request(struct csio_hw *hw)
{
struct Scsi_Host *shost;
struct csio_lnode *sln;
struct csio_lnode *ln;
struct list_head *cur_ln, *cur_cln;
struct csio_lnode **lnode_list;
int cur_cnt = 0, ii;
lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
GFP_KERNEL);
if (!lnode_list) {
csio_err(hw, "Failed to allocate lnodes_list");
return;
}
spin_lock_irq(&hw->lock);
/* Traverse sibling lnodes */
list_for_each(cur_ln, &hw->sln_head) {
sln = (struct csio_lnode *) cur_ln;
lnode_list[cur_cnt++] = sln;
/* Traverse children lnodes */
list_for_each(cur_cln, &sln->cln_head)
lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
}
spin_unlock_irq(&hw->lock);
for (ii = 0; ii < cur_cnt; ii++) {
csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
ln = lnode_list[ii];
shost = csio_ln_to_shost(ln);
scsi_block_requests(shost);
}
kfree(lnode_list);
}
void
csio_lnodes_unblock_request(struct csio_hw *hw)
{
struct csio_lnode *ln;
struct Scsi_Host *shost;
struct csio_lnode *sln;
struct list_head *cur_ln, *cur_cln;
struct csio_lnode **lnode_list;
int cur_cnt = 0, ii;
lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
GFP_KERNEL);
if (!lnode_list) {
csio_err(hw, "Failed to allocate lnodes_list");
return;
}
spin_lock_irq(&hw->lock);
/* Traverse sibling lnodes */
list_for_each(cur_ln, &hw->sln_head) {
sln = (struct csio_lnode *) cur_ln;
lnode_list[cur_cnt++] = sln;
/* Traverse children lnodes */
list_for_each(cur_cln, &sln->cln_head)
lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
}
spin_unlock_irq(&hw->lock);
for (ii = 0; ii < cur_cnt; ii++) {
csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
ln = lnode_list[ii];
shost = csio_ln_to_shost(ln);
scsi_unblock_requests(shost);
}
kfree(lnode_list);
}
void
csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
{
struct csio_lnode *ln;
struct Scsi_Host *shost;
struct csio_lnode *sln;
struct list_head *cur_ln, *cur_cln;
struct csio_lnode **lnode_list;
int cur_cnt = 0, ii;
lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
GFP_KERNEL);
if (!lnode_list) {
csio_err(hw, "Failed to allocate lnodes_list");
return;
}
spin_lock_irq(&hw->lock);
/* Traverse sibling lnodes */
list_for_each(cur_ln, &hw->sln_head) {
sln = (struct csio_lnode *) cur_ln;
if (sln->portid != portid)
continue;
lnode_list[cur_cnt++] = sln;
/* Traverse children lnodes */
list_for_each(cur_cln, &sln->cln_head)
lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
}
spin_unlock_irq(&hw->lock);
for (ii = 0; ii < cur_cnt; ii++) {
csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
ln = lnode_list[ii];
shost = csio_ln_to_shost(ln);
scsi_block_requests(shost);
}
kfree(lnode_list);
}
void
csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
{
struct csio_lnode *ln;
struct Scsi_Host *shost;
struct csio_lnode *sln;
struct list_head *cur_ln, *cur_cln;
struct csio_lnode **lnode_list;
int cur_cnt = 0, ii;
lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
GFP_KERNEL);
if (!lnode_list) {
csio_err(hw, "Failed to allocate lnodes_list");
return;
}
spin_lock_irq(&hw->lock);
/* Traverse sibling lnodes */
list_for_each(cur_ln, &hw->sln_head) {
sln = (struct csio_lnode *) cur_ln;
if (sln->portid != portid)
continue;
lnode_list[cur_cnt++] = sln;
/* Traverse children lnodes */
list_for_each(cur_cln, &sln->cln_head)
lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
}
spin_unlock_irq(&hw->lock);
for (ii = 0; ii < cur_cnt; ii++) {
csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
ln = lnode_list[ii];
shost = csio_ln_to_shost(ln);
scsi_unblock_requests(shost);
}
kfree(lnode_list);
}
void
csio_lnodes_exit(struct csio_hw *hw, bool npiv)
{
struct csio_lnode *sln;
struct csio_lnode *ln;
struct list_head *cur_ln, *cur_cln;
struct csio_lnode **lnode_list;
int cur_cnt = 0, ii;
lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
GFP_KERNEL);
if (!lnode_list) {
csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
return;
}
/* Get all child lnodes(NPIV ports) */
spin_lock_irq(&hw->lock);
list_for_each(cur_ln, &hw->sln_head) {
sln = (struct csio_lnode *) cur_ln;
/* Traverse children lnodes */
list_for_each(cur_cln, &sln->cln_head)
lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
}
spin_unlock_irq(&hw->lock);
/* Delete NPIV lnodes */
for (ii = 0; ii < cur_cnt; ii++) {
csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
ln = lnode_list[ii];
fc_vport_terminate(ln->fc_vport);
}
/* Delete only npiv lnodes */
if (npiv)
goto free_lnodes;
cur_cnt = 0;
/* Get all physical lnodes */
spin_lock_irq(&hw->lock);
/* Traverse sibling lnodes */
list_for_each(cur_ln, &hw->sln_head) {
sln = (struct csio_lnode *) cur_ln;
lnode_list[cur_cnt++] = sln;
}
spin_unlock_irq(&hw->lock);
/* Delete physical lnodes */
for (ii = 0; ii < cur_cnt; ii++) {
csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
csio_shost_exit(lnode_list[ii]);
}
free_lnodes:
kfree(lnode_list);
}
/*
* csio_lnode_init_post: Set lnode attributes after starting HW.
* @ln: lnode.
*
*/
static void
csio_lnode_init_post(struct csio_lnode *ln)
{
struct Scsi_Host *shost = csio_ln_to_shost(ln);
csio_fchost_attr_init(ln);
scsi_scan_host(shost);
}
/*
* csio_probe_one - Instantiate this function.
* @pdev: PCI device
* @id: Device ID
*
* This is the .probe() callback of the driver. This function:
* - Initializes the PCI function by enabling MMIO, setting bus
* mastership and setting DMA mask.
* - Allocates HW structure, DMA, memory resources, maps BARS to
* host memory and initializes HW module.
* - Allocates lnode structure via scsi_host_alloc, initializes
* shost, initialized lnode module and registers with SCSI ML
* via scsi_host_add.
* - Enables interrupts, and starts the chip by kicking off the
* HW state machine.
* - Once hardware is ready, initiated scan of the host via
* scsi_scan_host.
*/
static int __devinit
csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rv;
int bars;
int i;
struct csio_hw *hw;
struct csio_lnode *ln;
rv = csio_pci_init(pdev, &bars);
if (rv)
goto err;
hw = csio_hw_alloc(pdev);
if (!hw) {
rv = -ENODEV;
goto err_pci_exit;
}
pci_set_drvdata(pdev, hw);
if (csio_hw_start(hw) != 0) {
dev_err(&pdev->dev,
"Failed to start FW, continuing in debug mode.\n");
return 0;
}
sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
for (i = 0; i < hw->num_pports; i++) {
ln = csio_shost_init(hw, &pdev->dev, true, NULL);
if (!ln) {
rv = -ENODEV;
break;
}
/* Initialize portid */
ln->portid = hw->pport[i].portid;
spin_lock_irq(&hw->lock);
if (csio_lnode_start(ln) != 0)
rv = -ENODEV;
spin_unlock_irq(&hw->lock);
if (rv)
break;
csio_lnode_init_post(ln);
}
if (rv)
goto err_lnode_exit;
return 0;
err_lnode_exit:
csio_lnodes_block_request(hw);
spin_lock_irq(&hw->lock);
csio_hw_stop(hw);
spin_unlock_irq(&hw->lock);
csio_lnodes_unblock_request(hw);
pci_set_drvdata(hw->pdev, NULL);
csio_lnodes_exit(hw, 0);
csio_hw_free(hw);
err_pci_exit:
csio_pci_exit(pdev, &bars);
err:
dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
return rv;
}
/*
* csio_remove_one - Remove one instance of the driver at this PCI function.
* @pdev: PCI device
*
* Used during hotplug operation.
*/
static void __devexit
csio_remove_one(struct pci_dev *pdev)
{
struct csio_hw *hw = pci_get_drvdata(pdev);
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
csio_lnodes_block_request(hw);
spin_lock_irq(&hw->lock);
/* Stops lnode, Rnode s/m
* Quiesce IOs.
* All sessions with remote ports are unregistered.
*/
csio_hw_stop(hw);
spin_unlock_irq(&hw->lock);
csio_lnodes_unblock_request(hw);
csio_lnodes_exit(hw, 0);
csio_hw_free(hw);
pci_set_drvdata(pdev, NULL);
csio_pci_exit(pdev, &bars);
}
/*
* csio_pci_error_detected - PCI error was detected
* @pdev: PCI device
*
*/
static pci_ers_result_t
csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct csio_hw *hw = pci_get_drvdata(pdev);
csio_lnodes_block_request(hw);
spin_lock_irq(&hw->lock);
/* Post PCI error detected evt to HW s/m
* HW s/m handles this evt by quiescing IOs, unregisters rports
* and finally takes the device to offline.
*/
csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
spin_unlock_irq(&hw->lock);
csio_lnodes_unblock_request(hw);
csio_lnodes_exit(hw, 0);
csio_intr_disable(hw, true);
pci_disable_device(pdev);
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
/*
* csio_pci_slot_reset - PCI slot has been reset.
* @pdev: PCI device
*
*/
static pci_ers_result_t
csio_pci_slot_reset(struct pci_dev *pdev)
{
struct csio_hw *hw = pci_get_drvdata(pdev);
int ready;
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
pci_cleanup_aer_uncorrect_error_status(pdev);
/* Bring HW s/m to ready state.
* but don't resume IOs.
*/
spin_lock_irq(&hw->lock);
csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
ready = csio_is_hw_ready(hw);
spin_unlock_irq(&hw->lock);
if (ready) {
return PCI_ERS_RESULT_RECOVERED;
} else {
dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
}
/*
* csio_pci_resume - Resume normal operations
* @pdev: PCI device
*
*/
static void
csio_pci_resume(struct pci_dev *pdev)
{
struct csio_hw *hw = pci_get_drvdata(pdev);
struct csio_lnode *ln;
int rv = 0;
int i;
/* Bring the LINK UP and Resume IO */
for (i = 0; i < hw->num_pports; i++) {
ln = csio_shost_init(hw, &pdev->dev, true, NULL);
if (!ln) {
rv = -ENODEV;
break;
}
/* Initialize portid */
ln->portid = hw->pport[i].portid;
spin_lock_irq(&hw->lock);
if (csio_lnode_start(ln) != 0)
rv = -ENODEV;
spin_unlock_irq(&hw->lock);
if (rv)
break;
csio_lnode_init_post(ln);
}
if (rv)
goto err_resume_exit;
return;
err_resume_exit:
csio_lnodes_block_request(hw);
spin_lock_irq(&hw->lock);
csio_hw_stop(hw);
spin_unlock_irq(&hw->lock);
csio_lnodes_unblock_request(hw);
csio_lnodes_exit(hw, 0);
csio_hw_free(hw);
dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
}
static struct pci_error_handlers csio_err_handler = {
.error_detected = csio_pci_error_detected,
.slot_reset = csio_pci_slot_reset,
.resume = csio_pci_resume,
};
static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */
CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */
CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */
CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */
CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */
CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */
CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */
CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */
CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */
CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */
CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */
{ 0, 0, 0, 0, 0, 0, 0 }
};
static struct pci_driver csio_pci_driver = {
.name = KBUILD_MODNAME,
.driver = {
.owner = THIS_MODULE,
},
.id_table = csio_pci_tbl,
.probe = csio_probe_one,
.remove = csio_remove_one,
.err_handler = &csio_err_handler,
};
/*
* csio_init - Chelsio storage driver initialization function.
*
*/
static int __init
csio_init(void)
{
int rv = -ENOMEM;
pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
csio_dfs_init();
csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
if (!csio_fcoe_transport)
goto err;
csio_fcoe_transport_vport =
fc_attach_transport(&csio_fc_transport_vport_funcs);
if (!csio_fcoe_transport_vport)
goto err_vport;
rv = pci_register_driver(&csio_pci_driver);
if (rv)
goto err_pci;
return 0;
err_pci:
fc_release_transport(csio_fcoe_transport_vport);
err_vport:
fc_release_transport(csio_fcoe_transport);
err:
csio_dfs_exit();
return rv;
}
/*
* csio_exit - Chelsio storage driver uninitialization .
*
* Function that gets called in the unload path.
*/
static void __exit
csio_exit(void)
{
pci_unregister_driver(&csio_pci_driver);
csio_dfs_exit();
fc_release_transport(csio_fcoe_transport_vport);
fc_release_transport(csio_fcoe_transport);
}
module_init(csio_init);
module_exit(csio_exit);
MODULE_AUTHOR(CSIO_DRV_AUTHOR);
MODULE_DESCRIPTION(CSIO_DRV_DESC);
MODULE_LICENSE(CSIO_DRV_LICENSE);
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(CSIO_FW_FNAME);
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_INIT_H__
#define __CSIO_INIT_H__
#include <linux/pci.h>
#include <linux/if_ether.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "csio_scsi.h"
#include "csio_lnode.h"
#include "csio_rnode.h"
#include "csio_hw.h"
#define CSIO_DRV_AUTHOR "Chelsio Communications"
#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0"
#define CSIO_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
((_dev) == CSIO_DEVID_PE10K_PF1))
/* FCoE device IDs */
#define CSIO_DEVID_PE10K 0xA000
#define CSIO_DEVID_PE10K_PF1 0xA001
#define CSIO_DEVID_T440DBG_FCOE 0x4600
#define CSIO_DEVID_T420CR_FCOE 0x4601
#define CSIO_DEVID_T422CR_FCOE 0x4602
#define CSIO_DEVID_T440CR_FCOE 0x4603
#define CSIO_DEVID_T420BCH_FCOE 0x4604
#define CSIO_DEVID_T440BCH_FCOE 0x4605
#define CSIO_DEVID_T440CH_FCOE 0x4606
#define CSIO_DEVID_T420SO_FCOE 0x4607
#define CSIO_DEVID_T420CX_FCOE 0x4608
#define CSIO_DEVID_T420BT_FCOE 0x4609
#define CSIO_DEVID_T404BT_FCOE 0x460A
#define CSIO_DEVID_B420_FCOE 0x460B
#define CSIO_DEVID_B404_FCOE 0x460C
#define CSIO_DEVID_T480CR_FCOE 0x460D
#define CSIO_DEVID_T440LPCR_FCOE 0x460E
extern struct fc_function_template csio_fc_transport_funcs;
extern struct fc_function_template csio_fc_transport_vport_funcs;
void csio_fchost_attr_init(struct csio_lnode *);
/* INTx handlers */
void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *, void *);
void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *, void *);
/* Common os lnode APIs */
void csio_lnodes_block_request(struct csio_hw *);
void csio_lnodes_unblock_request(struct csio_hw *);
void csio_lnodes_block_by_port(struct csio_hw *, uint8_t);
void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t);
struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
struct csio_lnode *);
void csio_shost_exit(struct csio_lnode *);
void csio_lnodes_exit(struct csio_hw *, bool);
static inline struct Scsi_Host *
csio_ln_to_shost(struct csio_lnode *ln)
{
return container_of((void *)ln, struct Scsi_Host, hostdata[0]);
}
/* SCSI -- locking version of get/put ioreqs */
static inline struct csio_ioreq *
csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim)
{
struct csio_ioreq *ioreq;
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
ioreq = csio_get_scsi_ioreq(scsim);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
return ioreq;
}
static inline void
csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct csio_ioreq *ioreq)
{
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
csio_put_scsi_ioreq(scsim, ioreq);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
}
/* Called in interrupt context */
static inline void
csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct list_head *reqlist, int n)
{
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
csio_put_scsi_ioreq_list(scsim, reqlist, n);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
}
/* Called in interrupt context */
static inline void
csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct list_head *reqlist, int n)
{
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
csio_put_scsi_ddp_list(scsim, reqlist, n);
spin_unlock_irqrestore(&hw->lock, flags);
}
#endif /* ifndef __CSIO_INIT_H__ */
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include "csio_init.h"
#include "csio_hw.h"
static irqreturn_t
csio_nondata_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
int rv;
unsigned long flags;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
spin_lock_irqsave(&hw->lock, flags);
csio_hw_slow_intr_handler(hw);
rv = csio_mb_isr_handler(hw);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&hw->lock, flags);
return IRQ_HANDLED;
}
/*
* csio_fwevt_handler - Common FW event handler routine.
* @hw: HW module.
*
* This is the ISR for FW events. It is shared b/w MSIX
* and INTx handlers.
*/
static void
csio_fwevt_handler(struct csio_hw *hw)
{
int rv;
unsigned long flags;
rv = csio_fwevtq_handler(hw);
spin_lock_irqsave(&hw->lock, flags);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return;
}
spin_unlock_irqrestore(&hw->lock, flags);
} /* csio_fwevt_handler */
/*
* csio_fwevt_isr() - FW events MSIX ISR
* @irq:
* @dev_id:
*
* Process WRs on the FW event queue.
*
*/
static irqreturn_t
csio_fwevt_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
csio_fwevt_handler(hw);
return IRQ_HANDLED;
}
/*
* csio_fwevt_isr() - INTx wrapper for handling FW events.
* @irq:
* @dev_id:
*/
void
csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv)
{
csio_fwevt_handler(hw);
} /* csio_fwevt_intx_handler */
/*
* csio_process_scsi_cmpl - Process a SCSI WR completion.
* @hw: HW module.
* @wr: The completed WR from the ingress queue.
* @len: Length of the WR.
* @flb: Freelist buffer array.
*
*/
static void
csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *cbfn_q)
{
struct csio_ioreq *ioreq;
uint8_t *scsiwr;
uint8_t subop;
void *cmnd;
unsigned long flags;
ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
if (likely(ioreq)) {
if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
((struct fw_scsi_abrt_cls_wr *)
scsiwr)->sub_opcode_to_chk_all_io);
csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
subop ? "Close" : "Abort",
ioreq, ioreq->wr_status);
spin_lock_irqsave(&hw->lock, flags);
if (subop)
csio_scsi_closed(ioreq,
(struct list_head *)cbfn_q);
else
csio_scsi_aborted(ioreq,
(struct list_head *)cbfn_q);
/*
* We call scsi_done for I/Os that driver thinks aborts
* have timed out. If there is a race caused by FW
* completing abort at the exact same time that the
* driver has deteced the abort timeout, the following
* check prevents calling of scsi_done twice for the
* same command: once from the eh_abort_handler, another
* from csio_scsi_isr_handler(). This also avoids the
* need to check if csio_scsi_cmnd(req) is NULL in the
* fast path.
*/
cmnd = csio_scsi_cmnd(ioreq);
if (unlikely(cmnd == NULL))
list_del_init(&ioreq->sm.sm_list);
spin_unlock_irqrestore(&hw->lock, flags);
if (unlikely(cmnd == NULL))
csio_put_scsi_ioreq_lock(hw,
csio_hw_to_scsim(hw), ioreq);
} else {
spin_lock_irqsave(&hw->lock, flags);
csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
spin_unlock_irqrestore(&hw->lock, flags);
}
}
}
/*
* csio_scsi_isr_handler() - Common SCSI ISR handler.
* @iq: Ingress queue pointer.
*
* Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
* by calling csio_wr_process_iq_idx. If there are completions on the
* isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
* Once done, add these completions onto the freelist.
* This routine is shared b/w MSIX and INTx.
*/
static inline irqreturn_t
csio_scsi_isr_handler(struct csio_q *iq)
{
struct csio_hw *hw = (struct csio_hw *)iq->owner;
LIST_HEAD(cbfn_q);
struct list_head *tmp;
struct csio_scsim *scm;
struct csio_ioreq *ioreq;
int isr_completions = 0;
scm = csio_hw_to_scsim(hw);
if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
&cbfn_q) != 0))
return IRQ_NONE;
/* Call back the completion routines */
list_for_each(tmp, &cbfn_q) {
ioreq = (struct csio_ioreq *)tmp;
isr_completions++;
ioreq->io_cbfn(hw, ioreq);
/* Release ddp buffer if used for this req */
if (unlikely(ioreq->dcopy))
csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
ioreq->nsge);
}
if (isr_completions) {
/* Return the ioreqs back to ioreq->freelist */
csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
isr_completions);
}
return IRQ_HANDLED;
}
/*
* csio_scsi_isr() - SCSI MSIX handler
* @irq:
* @dev_id:
*
* This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
* for handling SCSI completions.
*/
static irqreturn_t
csio_scsi_isr(int irq, void *dev_id)
{
struct csio_q *iq = (struct csio_q *) dev_id;
struct csio_hw *hw;
if (unlikely(!iq))
return IRQ_NONE;
hw = (struct csio_hw *)iq->owner;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
csio_scsi_isr_handler(iq);
return IRQ_HANDLED;
}
/*
* csio_scsi_intx_handler() - SCSI INTx handler
* @irq:
* @dev_id:
*
* This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
* for handling SCSI completions.
*/
void
csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv)
{
struct csio_q *iq = priv;
csio_scsi_isr_handler(iq);
} /* csio_scsi_intx_handler */
/*
* csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
* @irq:
* @dev_id:
*
*
*/
static irqreturn_t
csio_fcoe_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
struct csio_q *intx_q = NULL;
int rv;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
/* Disable the interrupt for this PCI function. */
if (hw->intr_mode == CSIO_IM_INTX)
csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
/*
* The read in the following function will flush the
* above write.
*/
if (csio_hw_slow_intr_handler(hw))
ret = IRQ_HANDLED;
/* Get the INTx Forward interrupt IQ. */
intx_q = csio_get_q(hw, hw->intr_iq_idx);
CSIO_DB_ASSERT(intx_q);
/* IQ handler is not possible for intx_q, hence pass in NULL */
if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
ret = IRQ_HANDLED;
spin_lock_irqsave(&hw->lock, flags);
rv = csio_mb_isr_handler(hw);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&hw->lock, flags);
return ret;
}
static void
csio_add_msix_desc(struct csio_hw *hw)
{
int i;
struct csio_msix_entries *entryp = &hw->msix_entries[0];
int k = CSIO_EXTRA_VECS;
int len = sizeof(entryp->desc) - 1;
int cnt = hw->num_sqsets + k;
/* Non-data vector */
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
entryp++;
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
entryp++;
/* Name SCSI vecs */
for (i = k; i < cnt; i++, entryp++) {
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
}
}
int
csio_request_irqs(struct csio_hw *hw)
{
int rv, i, j, k = 0;
struct csio_msix_entries *entryp = &hw->msix_entries[0];
struct csio_scsi_cpu_info *info;
if (hw->intr_mode != CSIO_IM_MSIX) {
rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
(hw->intr_mode == CSIO_IM_MSI) ?
0 : IRQF_SHARED,
KBUILD_MODNAME, hw);
if (rv) {
if (hw->intr_mode == CSIO_IM_MSI)
pci_disable_msi(hw->pdev);
csio_err(hw, "Failed to allocate interrupt line.\n");
return -EINVAL;
}
goto out;
}
/* Add the MSIX vector descriptions */
csio_add_msix_desc(hw);
rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
entryp[k].desc, hw);
if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k++].dev_id = (void *)hw;
rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
entryp[k].desc, hw);
if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k++].dev_id = (void *)hw;
/* Allocate IRQs for SCSI */
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < info->max_cpus; j++, k++) {
struct csio_scsi_qset *sqset = &hw->sqset[i][j];
struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
entryp[k].desc, q);
if (rv) {
csio_err(hw,
"IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k].dev_id = (void *)q;
} /* for all scsi cpus */
} /* for all ports */
out:
hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
return 0;
err:
for (i = 0; i < k; i++) {
entryp = &hw->msix_entries[i];
free_irq(entryp->vector, entryp->dev_id);
}
pci_disable_msix(hw->pdev);
return -EINVAL;
}
static void
csio_disable_msix(struct csio_hw *hw, bool free)
{
int i;
struct csio_msix_entries *entryp;
int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
if (free) {
for (i = 0; i < cnt; i++) {
entryp = &hw->msix_entries[i];
free_irq(entryp->vector, entryp->dev_id);
}
}
pci_disable_msix(hw->pdev);
}
/* Reduce per-port max possible CPUs */
static void
csio_reduce_sqsets(struct csio_hw *hw, int cnt)
{
int i;
struct csio_scsi_cpu_info *info;
while (cnt < hw->num_sqsets) {
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
if (info->max_cpus > 1) {
info->max_cpus--;
hw->num_sqsets--;
if (hw->num_sqsets <= cnt)
break;
}
}
}
csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
}
static int
csio_enable_msix(struct csio_hw *hw)
{
int rv, i, j, k, n, min, cnt;
struct csio_msix_entries *entryp;
struct msix_entry *entries;
int extra = CSIO_EXTRA_VECS;
struct csio_scsi_cpu_info *info;
min = hw->num_pports + extra;
cnt = hw->num_sqsets + extra;
/* Max vectors required based on #niqs configured in fw */
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
cnt = min_t(uint8_t, hw->cfg_niq, cnt);
entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
if (!entries)
return -ENOMEM;
for (i = 0; i < cnt; i++)
entries[i].entry = (uint16_t)i;
csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)
cnt = rv;
if (!rv) {
if (cnt < (hw->num_sqsets + extra)) {
csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
csio_reduce_sqsets(hw, cnt - extra);
}
} else {
if (rv > 0) {
pci_disable_msix(hw->pdev);
csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
}
kfree(entries);
return -ENOMEM;
}
/* Save off vectors */
for (i = 0; i < cnt; i++) {
entryp = &hw->msix_entries[i];
entryp->vector = entries[i].vector;
}
/* Distribute vectors */
k = 0;
csio_set_nondata_intr_idx(hw, entries[k].entry);
csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
csio_set_fwevt_intr_idx(hw, entries[k++].entry);
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
n = (j % info->max_cpus) + k;
hw->sqset[i][j].intr_idx = entries[n].entry;
}
k += info->max_cpus;
}
kfree(entries);
return 0;
}
void
csio_intr_enable(struct csio_hw *hw)
{
hw->intr_mode = CSIO_IM_NONE;
hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
/* Try MSIX, then MSI or fall back to INTx */
if ((csio_msi == 2) && !csio_enable_msix(hw))
hw->intr_mode = CSIO_IM_MSIX;
else {
/* Max iqs required based on #niqs configured in fw */
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
!csio_is_hw_master(hw)) {
int extra = CSIO_EXTRA_MSI_IQS;
if (hw->cfg_niq < (hw->num_sqsets + extra)) {
csio_dbg(hw, "Reducing sqsets to %d\n",
hw->cfg_niq - extra);
csio_reduce_sqsets(hw, hw->cfg_niq - extra);
}
}
if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
hw->intr_mode = CSIO_IM_MSI;
else
hw->intr_mode = CSIO_IM_INTX;
}
csio_dbg(hw, "Using %s interrupt mode.\n",
(hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
}
void
csio_intr_disable(struct csio_hw *hw, bool free)
{
csio_hw_intr_disable(hw);
switch (hw->intr_mode) {
case CSIO_IM_MSIX:
csio_disable_msix(hw, free);
break;
case CSIO_IM_MSI:
if (free)
free_irq(hw->pdev->irq, hw);
pci_disable_msi(hw->pdev);
break;
case CSIO_IM_INTX:
if (free)
free_irq(hw->pdev->irq, hw);
break;
default:
break;
}
hw->intr_mode = CSIO_IM_NONE;
hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
}
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/fc/fc_ms.h>
#include "csio_hw.h"
#include "csio_mb.h"
#include "csio_lnode.h"
#include "csio_rnode.h"
int csio_fcoe_rnodes = 1024;
int csio_fdmi_enable = 1;
#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
/* Lnode SM declarations */
static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
/* LN event mapping */
static enum csio_ln_ev fwevt_to_lnevt[] = {
CSIO_LNE_NONE, /* None */
CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
CSIO_LNE_NONE, /* PLOGI_RCVD */
CSIO_LNE_NONE, /* PLOGO_RCVD */
CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
CSIO_LNE_NONE, /* PRLI_RCVD */
CSIO_LNE_NONE, /* PRLO_RCVD */
CSIO_LNE_NONE, /* NPORT_ID_CHGD */
CSIO_LNE_LOGO, /* FLOGO_RCVD */
CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
CSIO_LNE_NONE, /* PRLI_TMO */
CSIO_LNE_NONE, /* ADISC_TMO */
CSIO_LNE_NONE, /* RSCN_DEV_LOST */
CSIO_LNE_NONE, /* SCR_ACC_RCVD */
CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
CSIO_LNE_NONE, /* LOGO_SNT */
CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
};
#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
CSIO_LNE_NONE : \
fwevt_to_lnevt[_evt])
#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
/*
* csio_ln_match_by_portid - lookup lnode using given portid.
* @hw: HW module
* @portid: port-id.
*
* If found, returns lnode matching given portid otherwise returns NULL.
*/
static struct csio_lnode *
csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
{
struct csio_lnode *ln = hw->rln;
struct list_head *tmp;
/* Match siblings lnode with portid */
list_for_each(tmp, &hw->sln_head) {
ln = (struct csio_lnode *) tmp;
if (ln->portid == portid)
return ln;
}
return NULL;
}
/*
* csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
* @hw - HW module
* @vnpi - vnp index.
* Returns - If found, returns lnode matching given vnp id
* otherwise returns NULL.
*/
static struct csio_lnode *
csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
{
struct list_head *tmp1, *tmp2;
struct csio_lnode *sln = NULL, *cln = NULL;
if (list_empty(&hw->sln_head)) {
CSIO_INC_STATS(hw, n_lnlkup_miss);
return NULL;
}
/* Traverse sibling lnodes */
list_for_each(tmp1, &hw->sln_head) {
sln = (struct csio_lnode *) tmp1;
/* Match sibling lnode */
if (sln->vnp_flowid == vnp_id)
return sln;
if (list_empty(&sln->cln_head))
continue;
/* Traverse children lnodes */
list_for_each(tmp2, &sln->cln_head) {
cln = (struct csio_lnode *) tmp2;
if (cln->vnp_flowid == vnp_id)
return cln;
}
}
CSIO_INC_STATS(hw, n_lnlkup_miss);
return NULL;
}
/**
* csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
* @hw: HW module.
* @wwpn: WWPN.
*
* If found, returns lnode matching given wwpn, returns NULL otherwise.
*/
struct csio_lnode *
csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
{
struct list_head *tmp1, *tmp2;
struct csio_lnode *sln = NULL, *cln = NULL;
if (list_empty(&hw->sln_head)) {
CSIO_INC_STATS(hw, n_lnlkup_miss);
return NULL;
}
/* Traverse sibling lnodes */
list_for_each(tmp1, &hw->sln_head) {
sln = (struct csio_lnode *) tmp1;
/* Match sibling lnode */
if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
return sln;
if (list_empty(&sln->cln_head))
continue;
/* Traverse children lnodes */
list_for_each(tmp2, &sln->cln_head) {
cln = (struct csio_lnode *) tmp2;
if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
return cln;
}
}
return NULL;
}
/* FDMI */
static void
csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
{
struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
cmd->ct_rev = FC_CT_REV;
cmd->ct_fs_type = type;
cmd->ct_fs_subtype = sub_type;
cmd->ct_cmd = op;
}
static int
csio_hostname(uint8_t *buf, size_t buf_len)
{
if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
return 0;
return -1;
}
static int
csio_osname(uint8_t *buf, size_t buf_len)
{
if (snprintf(buf, buf_len, "%s %s %s",
init_utsname()->sysname,
init_utsname()->release,
init_utsname()->version) > 0)
return 0;
return -1;
}
static inline void
csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
{
struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
ae->type = htons(type);
len += 4; /* includes attribute type and length */
len = (len + 3) & ~3; /* should be multiple of 4 bytes */
ae->len = htons(len);
memset(ae->value, 0, len - 4);
memcpy(ae->value, val, len);
*ptr += len;
}
/*
* csio_ln_fdmi_done - FDMI registeration completion
* @hw: HW context
* @fdmi_req: fdmi request
*/
static void
csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
{
void *cmd;
struct csio_lnode *ln = fdmi_req->lnode;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
fdmi_req->wr_status);
CSIO_INC_STATS(ln, n_fdmi_err);
}
cmd = fdmi_req->dma_buf.vaddr;
if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
csio_ct_reason(cmd), csio_ct_expl(cmd));
}
}
/*
* csio_ln_fdmi_rhba_cbfn - RHBA completion
* @hw: HW context
* @fdmi_req: fdmi request
*/
static void
csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
{
void *cmd;
uint8_t *pld;
uint32_t len = 0;
struct csio_lnode *ln = fdmi_req->lnode;
struct fs_fdmi_attrs *attrib_blk;
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint32_t val;
uint8_t *fc4_type;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
fdmi_req->wr_status);
CSIO_INC_STATS(ln, n_fdmi_err);
}
cmd = fdmi_req->dma_buf.vaddr;
if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
csio_ct_reason(cmd), csio_ct_expl(cmd));
}
if (!csio_is_rnode_ready(fdmi_req->rnode)) {
CSIO_INC_STATS(ln, n_fdmi_err);
return;
}
/* Prepare CT hdr for RPA cmd */
memset(cmd, 0, FC_CT_HDR_LEN);
csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_RPA));
/* Prepare RPA payload */
pld = (uint8_t *)csio_ct_get_pld(cmd);
port_name = (struct fc_fdmi_port_name *)pld;
memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
pld += sizeof(*port_name);
/* Start appending Port attributes */
attrib_blk = (struct fs_fdmi_attrs *)pld;
attrib_blk->numattrs = 0;
len += sizeof(attrib_blk->numattrs);
pld += sizeof(attrib_blk->numattrs);
fc4_type = &buf[0];
memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
fc4_type[2] = 1;
fc4_type[7] = 1;
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
attrib_blk->numattrs++;
val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
(uint8_t *)&val,
FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
attrib_blk->numattrs++;
if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
val = htonl(FC_PORTSPEED_1GBIT);
else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
val = htonl(FC_PORTSPEED_10GBIT);
else
val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
(uint8_t *)&val,
FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
attrib_blk->numattrs++;
val = htonl(ln->ln_sparm.csp.sp_bb_data);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
(uint8_t *)&val, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
attrib_blk->numattrs++;
strcpy(buf, "csiostor");
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
(uint16_t)strlen(buf));
attrib_blk->numattrs++;
if (!csio_hostname(buf, sizeof(buf))) {
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
buf, (uint16_t)strlen(buf));
attrib_blk->numattrs++;
}
attrib_blk->numattrs = ntohl(attrib_blk->numattrs);
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
spin_lock_irq(&hw->lock);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
spin_unlock_irq(&hw->lock);
}
/*
* csio_ln_fdmi_dprt_cbfn - DPRT completion
* @hw: HW context
* @fdmi_req: fdmi request
*/
static void
csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
{
void *cmd;
uint8_t *pld;
uint32_t len = 0;
uint32_t maxpayload = htonl(65536);
struct fc_fdmi_hba_identifier *hbaid;
struct csio_lnode *ln = fdmi_req->lnode;
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
fdmi_req->wr_status);
CSIO_INC_STATS(ln, n_fdmi_err);
}
if (!csio_is_rnode_ready(fdmi_req->rnode)) {
CSIO_INC_STATS(ln, n_fdmi_err);
return;
}
cmd = fdmi_req->dma_buf.vaddr;
if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
csio_ct_reason(cmd), csio_ct_expl(cmd));
}
/* Prepare CT hdr for RHBA cmd */
memset(cmd, 0, FC_CT_HDR_LEN);
csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_RHBA));
len = FC_CT_HDR_LEN;
/* Prepare RHBA payload */
pld = (uint8_t *)csio_ct_get_pld(cmd);
hbaid = (struct fc_fdmi_hba_identifier *)pld;
memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
pld += sizeof(*hbaid);
/* Register one port per hba */
reg_pl = (struct fc_fdmi_rpl *)pld;
reg_pl->numport = ntohl(1);
memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
pld += sizeof(*reg_pl);
/* Start appending HBA attributes hba */
attrib_blk = (struct fs_fdmi_attrs *)pld;
attrib_blk->numattrs = 0;
len += sizeof(attrib_blk->numattrs);
pld += sizeof(attrib_blk->numattrs);
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
FC_FDMI_HBA_ATTR_NODENAME_LEN);
attrib_blk->numattrs++;
memset(buf, 0, sizeof(buf));
strcpy(buf, "Chelsio Communications");
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
(uint16_t)strlen(buf));
attrib_blk->numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
attrib_blk->numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
(uint16_t)sizeof(hw->vpd.id));
attrib_blk->numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
hw->model_desc, (uint16_t)strlen(hw->model_desc));
attrib_blk->numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
attrib_blk->numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
attrib_blk->numattrs++;
if (!csio_osname(buf, sizeof(buf))) {
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
buf, (uint16_t)strlen(buf));
attrib_blk->numattrs++;
}
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
(uint8_t *)&maxpayload,
FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
len = (uint32_t)(pld - (uint8_t *)cmd);
attrib_blk->numattrs++;
attrib_blk->numattrs = ntohl(attrib_blk->numattrs);
/* Submit FDMI RHBA request */
spin_lock_irq(&hw->lock);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
spin_unlock_irq(&hw->lock);
}
/*
* csio_ln_fdmi_dhba_cbfn - DHBA completion
* @hw: HW context
* @fdmi_req: fdmi request
*/
static void
csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
{
struct csio_lnode *ln = fdmi_req->lnode;
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
fdmi_req->wr_status);
CSIO_INC_STATS(ln, n_fdmi_err);
}
if (!csio_is_rnode_ready(fdmi_req->rnode)) {
CSIO_INC_STATS(ln, n_fdmi_err);
return;
}
cmd = fdmi_req->dma_buf.vaddr;
if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
csio_ct_reason(cmd), csio_ct_expl(cmd));
}
/* Send FDMI cmd to de-register any Port attributes if registered
* before
*/
/* Prepare FDMI DPRT cmd */
memset(cmd, 0, FC_CT_HDR_LEN);
csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_DPRT));
len = FC_CT_HDR_LEN;
port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
len += sizeof(*port_name);
/* Submit FDMI request */
spin_lock_irq(&hw->lock);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
spin_unlock_irq(&hw->lock);
}
/**
* csio_ln_fdmi_start - Start an FDMI request.
* @ln: lnode
* @context: session context
*
* Issued with lock held.
*/
int
csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
{
struct csio_ioreq *fdmi_req;
struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
void *cmd;
struct fc_fdmi_hba_identifier *hbaid;
uint32_t len;
if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
return -EPROTONOSUPPORT;
if (!csio_is_rnode_ready(fdmi_rn))
CSIO_INC_STATS(ln, n_fdmi_err);
/* Send FDMI cmd to de-register any HBA attributes if registered
* before
*/
fdmi_req = ln->mgmt_req;
fdmi_req->lnode = ln;
fdmi_req->rnode = fdmi_rn;
/* Prepare FDMI DHBA cmd */
cmd = fdmi_req->dma_buf.vaddr;
memset(cmd, 0, FC_CT_HDR_LEN);
csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, htons(FC_FDMI_DHBA));
len = FC_CT_HDR_LEN;
hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
len += sizeof(*hbaid);
/* Submit FDMI request */
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
}
return 0;
}
/*
* csio_ln_vnp_read_cbfn - vnp read completion handler.
* @hw: HW lnode
* @cbfn: Completion handler.
*
* Reads vnp response and updates ln parameters.
*/
static void
csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
{
struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
struct fc_els_csp *csp;
struct fc_els_cssp *clsp;
enum fw_retval retval;
retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
if (retval != FW_SUCCESS) {
csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
mempool_free(mbp, hw->mb_mempool);
return;
}
spin_lock_irq(&hw->lock);
memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
memcpy(&ln->nport_id, &rsp->vnport_mac[3],
sizeof(uint8_t)*3);
ln->nport_id = ntohl(ln->nport_id);
ln->nport_id = ln->nport_id>>8;
/* Update WWNs */
/*
* This may look like a duplication of what csio_fcoe_enable_link()
* does, but is absolutely necessary if the vnpi changes between
* a FCOE LINK UP and FCOE LINK DOWN.
*/
memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
/* Copy common sparam */
csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
ln->ln_sparm.csp.sp_bb_cred = ntohs(csp->sp_bb_cred);
ln->ln_sparm.csp.sp_features = ntohs(csp->sp_features);
ln->ln_sparm.csp.sp_bb_data = ntohs(csp->sp_bb_data);
ln->ln_sparm.csp.sp_r_a_tov = ntohl(csp->sp_r_a_tov);
ln->ln_sparm.csp.sp_e_d_tov = ntohl(csp->sp_e_d_tov);
/* Copy word 0 & word 1 of class sparam */
clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
ln->ln_sparm.clsp[2].cp_class = ntohs(clsp->cp_class);
ln->ln_sparm.clsp[2].cp_init = ntohs(clsp->cp_init);
ln->ln_sparm.clsp[2].cp_recip = ntohs(clsp->cp_recip);
ln->ln_sparm.clsp[2].cp_rdfs = ntohs(clsp->cp_rdfs);
spin_unlock_irq(&hw->lock);
mempool_free(mbp, hw->mb_mempool);
/* Send an event to update local attribs */
csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
}
/*
* csio_ln_vnp_read - Read vnp params.
* @ln: lnode
* @cbfn: Completion handler.
*
* Issued with lock held.
*/
static int
csio_ln_vnp_read(struct csio_lnode *ln,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct csio_hw *hw = ln->hwp;
struct csio_mb *mbp;
/* Allocate Mbox request */
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
CSIO_INC_STATS(hw, n_err_nomem);
return -ENOMEM;
}
/* Prepare VNP Command */
csio_fcoe_vnp_read_init_mb(ln, mbp,
CSIO_MB_DEFAULT_TMO,
ln->fcf_flowid,
ln->vnp_flowid,
cbfn);
/* Issue MBOX cmd */
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
return 0;
}
/*
* csio_fcoe_enable_link - Enable fcoe link.
* @ln: lnode
* @enable: enable/disable
* Issued with lock held.
* Issues mbox cmd to bring up FCOE link on port associated with given ln.
*/
static int
csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
{
struct csio_hw *hw = ln->hwp;
struct csio_mb *mbp;
enum fw_retval retval;
uint8_t portid;
uint8_t sub_op;
struct fw_fcoe_link_cmd *lcmd;
int i;
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
CSIO_INC_STATS(hw, n_err_nomem);
return -ENOMEM;
}
portid = ln->portid;
sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
sub_op ? "UP" : "DOWN", portid);
csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
portid, sub_op, 0, 0, 0, NULL);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
portid);
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
retval = csio_mb_fw_retval(mbp);
if (retval != FW_SUCCESS) {
csio_err(hw,
"FCOE LINK %s cmd on port[%d] failed with "
"ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
if (!enable)
goto out;
lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
for (i = 0; i < CSIO_MAX_PPORTS; i++)
if (hw->pport[i].portid == portid)
memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
out:
mempool_free(mbp, hw->mb_mempool);
return 0;
}
/*
* csio_ln_read_fcf_cbfn - Read fcf parameters
* @ln: lnode
*
* read fcf response and Update ln fcf information.
*/
static void
csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
{
struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
struct csio_fcf_info *fcf_info;
struct fw_fcoe_fcf_cmd *rsp =
(struct fw_fcoe_fcf_cmd *)(mbp->mb);
enum fw_retval retval;
retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
if (retval != FW_SUCCESS) {
csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
retval);
mempool_free(mbp, hw->mb_mempool);
return;
}
spin_lock_irq(&hw->lock);
fcf_info = ln->fcfinfo;
fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
ntohs(rsp->priority_pkd));
fcf_info->vf_id = ntohs(rsp->vf_id);
fcf_info->vlan_id = rsp->vlan_id;
fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
spin_unlock_irq(&hw->lock);
mempool_free(mbp, hw->mb_mempool);
}
/*
* csio_ln_read_fcf_entry - Read fcf entry.
* @ln: lnode
* @cbfn: Completion handler.
*
* Issued with lock held.
*/
static int
csio_ln_read_fcf_entry(struct csio_lnode *ln,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct csio_hw *hw = ln->hwp;
struct csio_mb *mbp;
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
CSIO_INC_STATS(hw, n_err_nomem);
return -ENOMEM;
}
/* Get FCoE FCF information */
csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
ln->portid, ln->fcf_flowid, cbfn);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "failed to issue FCOE FCF cmd\n");
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
return 0;
}
/*
* csio_handle_link_up - Logical Linkup event.
* @hw - HW module.
* @portid - Physical port number
* @fcfi - FCF index.
* @vnpi - VNP index.
* Returns - none.
*
* This event is received from FW, when virtual link is established between
* Physical port[ENode] and FCF. If its new vnpi, then local node object is
* created on this FCF and set to [ONLINE] state.
* Lnode waits for FW_RDEV_CMD event to be received indicating that
* Fabric login is completed and lnode moves to [READY] state.
*
* This called with hw lock held
*/
static void
csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
uint32_t vnpi)
{
struct csio_lnode *ln = NULL;
/* Lookup lnode based on vnpi */
ln = csio_ln_lookup_by_vnpi(hw, vnpi);
if (!ln) {
/* Pick lnode based on portid */
ln = csio_ln_lookup_by_portid(hw, portid);
if (!ln) {
csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
portid);
CSIO_DB_ASSERT(0);
return;
}
/* Check if lnode has valid vnp flowid */
if (ln->vnp_flowid != CSIO_INVALID_IDX) {
/* New VN-Port */
spin_unlock_irq(&hw->lock);
csio_lnode_alloc(hw);
spin_lock_irq(&hw->lock);
if (!ln) {
csio_err(hw,
"failed to allocate fcoe lnode"
"for port:%d vnpi:x%x\n",
portid, vnpi);
CSIO_DB_ASSERT(0);
return;
}
ln->portid = portid;
}
ln->vnp_flowid = vnpi;
ln->dev_num &= ~0xFFFF;
ln->dev_num |= vnpi;
}
/*Initialize fcfi */
ln->fcf_flowid = fcfi;
csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
CSIO_INC_STATS(ln, n_link_up);
/* Send LINKUP event to SM */
csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
}
/*
* csio_post_event_rns
* @ln - FCOE lnode
* @evt - Given rnode event
* Returns - none
*
* Posts given rnode event to all FCOE rnodes connected with given Lnode.
* This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
* event.
*
* This called with hw lock held
*/
static void
csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
{
struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
struct list_head *tmp, *next;
struct csio_rnode *rn;
list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
csio_post_event(&rn->sm, evt);
}
}
/*
* csio_cleanup_rns
* @ln - FCOE lnode
* Returns - none
*
* Frees all FCOE rnodes connected with given Lnode.
*
* This called with hw lock held
*/
static void
csio_cleanup_rns(struct csio_lnode *ln)
{
struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
struct list_head *tmp, *next_rn;
struct csio_rnode *rn;
list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
csio_put_rnode(ln, rn);
}
}
/*
* csio_post_event_lns
* @ln - FCOE lnode
* @evt - Given lnode event
* Returns - none
*
* Posts given lnode event to all FCOE lnodes connected with given Lnode.
* This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
* event.
*
* This called with hw lock held
*/
static void
csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
{
struct list_head *tmp;
struct csio_lnode *cln, *sln;
/* If NPIV lnode, send evt only to that and return */
if (csio_is_npiv_ln(ln)) {
csio_post_event(&ln->sm, evt);
return;
}
sln = ln;
/* Traverse children lnodes list and send evt */
list_for_each(tmp, &sln->cln_head) {
cln = (struct csio_lnode *) tmp;
csio_post_event(&cln->sm, evt);
}
/* Send evt to parent lnode */
csio_post_event(&ln->sm, evt);
}
/*
* csio_ln_down - Lcoal nport is down
* @ln - FCOE Lnode
* Returns - none
*
* Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
*
* This called with hw lock held
*/
static void
csio_ln_down(struct csio_lnode *ln)
{
csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
}
/*
* csio_handle_link_down - Logical Linkdown event.
* @hw - HW module.
* @portid - Physical port number
* @fcfi - FCF index.
* @vnpi - VNP index.
* Returns - none
*
* This event is received from FW, when virtual link goes down between
* Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
* this vnpi[VN-Port] will be de-instantiated.
*
* This called with hw lock held
*/
static void
csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
uint32_t vnpi)
{
struct csio_fcf_info *fp;
struct csio_lnode *ln;
/* Lookup lnode based on vnpi */
ln = csio_ln_lookup_by_vnpi(hw, vnpi);
if (ln) {
fp = ln->fcfinfo;
CSIO_INC_STATS(ln, n_link_down);
/*Warn if linkdown received if lnode is not in ready state */
if (!csio_is_lnode_ready(ln)) {
csio_ln_warn(ln,
"warn: FCOE link is already in offline "
"Ignoring Fcoe linkdown event on portid %d\n",
portid);
CSIO_INC_STATS(ln, n_evt_drop);
return;
}
/* Verify portid */
if (fp->portid != portid) {
csio_ln_warn(ln,
"warn: FCOE linkdown recv with "
"invalid port %d\n", portid);
CSIO_INC_STATS(ln, n_evt_drop);
return;
}
/* verify fcfi */
if (ln->fcf_flowid != fcfi) {
csio_ln_warn(ln,
"warn: FCOE linkdown recv with "
"invalid fcfi x%x\n", fcfi);
CSIO_INC_STATS(ln, n_evt_drop);
return;
}
csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
/* Send LINK_DOWN event to lnode s/m */
csio_ln_down(ln);
return;
} else {
csio_warn(hw,
"warn: FCOE linkdown recv with invalid vnpi x%x\n",
vnpi);
CSIO_INC_STATS(hw, n_evt_drop);
}
}
/*
* csio_is_lnode_ready - Checks FCOE lnode is in ready state.
* @ln: Lnode module
*
* Returns True if FCOE lnode is in ready state.
*/
int
csio_is_lnode_ready(struct csio_lnode *ln)
{
return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
}
/*****************************************************************************/
/* START: Lnode SM */
/*****************************************************************************/
/*
* csio_lns_uninit - The request in uninit state.
* @ln - FCOE lnode.
* @evt - Event to be processed.
*
* Process the given lnode event which is currently in "uninit" state.
* Invoked with HW lock held.
* Return - none.
*/
static void
csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
{
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct csio_lnode *rln = hw->rln;
int rv;
CSIO_INC_STATS(ln, n_evt_sm[evt]);
switch (evt) {
case CSIO_LNE_LINKUP:
csio_set_state(&ln->sm, csio_lns_online);
/* Read FCF only for physical lnode */
if (csio_is_phys_ln(ln)) {
rv = csio_ln_read_fcf_entry(ln,
csio_ln_read_fcf_cbfn);
if (rv != 0) {
/* TODO: Send HW RESET event */
CSIO_INC_STATS(ln, n_err);
break;
}
/* Add FCF record */
list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
}
rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
if (rv != 0) {
/* TODO: Send HW RESET event */
CSIO_INC_STATS(ln, n_err);
}
break;
case CSIO_LNE_DOWN_LINK:
break;
default:
csio_ln_dbg(ln,
"unexp ln event %d recv from did:x%x in "
"ln state[uninit].\n", evt, ln->nport_id);
CSIO_INC_STATS(ln, n_evt_unexp);
break;
} /* switch event */
}
/*
* csio_lns_online - The request in online state.
* @ln - FCOE lnode.
* @evt - Event to be processed.
*
* Process the given lnode event which is currently in "online" state.
* Invoked with HW lock held.
* Return - none.
*/
static void
csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
{
struct csio_hw *hw = csio_lnode_to_hw(ln);
CSIO_INC_STATS(ln, n_evt_sm[evt]);
switch (evt) {
case CSIO_LNE_LINKUP:
csio_ln_warn(ln,
"warn: FCOE link is up already "
"Ignoring linkup on port:%d\n", ln->portid);
CSIO_INC_STATS(ln, n_evt_drop);
break;
case CSIO_LNE_FAB_INIT_DONE:
csio_set_state(&ln->sm, csio_lns_ready);
spin_unlock_irq(&hw->lock);
csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
spin_lock_irq(&hw->lock);
break;
case CSIO_LNE_LINK_DOWN:
/* Fall through */
case CSIO_LNE_DOWN_LINK:
csio_set_state(&ln->sm, csio_lns_uninit);
if (csio_is_phys_ln(ln)) {
/* Remove FCF entry */
list_del_init(&ln->fcfinfo->list);
}
break;
default:
csio_ln_dbg(ln,
"unexp ln event %d recv from did:x%x in "
"ln state[uninit].\n", evt, ln->nport_id);
CSIO_INC_STATS(ln, n_evt_unexp);
break;
} /* switch event */
}
/*
* csio_lns_ready - The request in ready state.
* @ln - FCOE lnode.
* @evt - Event to be processed.
*
* Process the given lnode event which is currently in "ready" state.
* Invoked with HW lock held.
* Return - none.
*/
static void
csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
{
struct csio_hw *hw = csio_lnode_to_hw(ln);
CSIO_INC_STATS(ln, n_evt_sm[evt]);
switch (evt) {
case CSIO_LNE_FAB_INIT_DONE:
csio_ln_dbg(ln,
"ignoring event %d recv from did x%x"
"in ln state[ready].\n", evt, ln->nport_id);
CSIO_INC_STATS(ln, n_evt_drop);
break;
case CSIO_LNE_LINK_DOWN:
csio_set_state(&ln->sm, csio_lns_offline);
csio_post_event_rns(ln, CSIO_RNFE_DOWN);
spin_unlock_irq(&hw->lock);
csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
spin_lock_irq(&hw->lock);
if (csio_is_phys_ln(ln)) {
/* Remove FCF entry */
list_del_init(&ln->fcfinfo->list);
}
break;
case CSIO_LNE_DOWN_LINK:
csio_set_state(&ln->sm, csio_lns_offline);
csio_post_event_rns(ln, CSIO_RNFE_DOWN);
/* Host need to issue aborts in case if FW has not returned
* WRs with status "ABORTED"
*/
spin_unlock_irq(&hw->lock);
csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
spin_lock_irq(&hw->lock);
if (csio_is_phys_ln(ln)) {
/* Remove FCF entry */
list_del_init(&ln->fcfinfo->list);
}
break;
case CSIO_LNE_CLOSE:
csio_set_state(&ln->sm, csio_lns_uninit);
csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
break;
case CSIO_LNE_LOGO:
csio_set_state(&ln->sm, csio_lns_offline);
csio_post_event_rns(ln, CSIO_RNFE_DOWN);
break;
default:
csio_ln_dbg(ln,
"unexp ln event %d recv from did:x%x in "
"ln state[uninit].\n", evt, ln->nport_id);
CSIO_INC_STATS(ln, n_evt_unexp);
CSIO_DB_ASSERT(0);
break;
} /* switch event */
}
/*
* csio_lns_offline - The request in offline state.
* @ln - FCOE lnode.
* @evt - Event to be processed.
*
* Process the given lnode event which is currently in "offline" state.
* Invoked with HW lock held.
* Return - none.
*/
static void
csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
{
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct csio_lnode *rln = hw->rln;
int rv;
CSIO_INC_STATS(ln, n_evt_sm[evt]);
switch (evt) {
case CSIO_LNE_LINKUP:
csio_set_state(&ln->sm, csio_lns_online);
/* Read FCF only for physical lnode */
if (csio_is_phys_ln(ln)) {
rv = csio_ln_read_fcf_entry(ln,
csio_ln_read_fcf_cbfn);
if (rv != 0) {
/* TODO: Send HW RESET event */
CSIO_INC_STATS(ln, n_err);
break;
}
/* Add FCF record */
list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
}
rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
if (rv != 0) {
/* TODO: Send HW RESET event */
CSIO_INC_STATS(ln, n_err);
}
break;
case CSIO_LNE_LINK_DOWN:
case CSIO_LNE_DOWN_LINK:
case CSIO_LNE_LOGO:
csio_ln_dbg(ln,
"ignoring event %d recv from did x%x"
"in ln state[offline].\n", evt, ln->nport_id);
CSIO_INC_STATS(ln, n_evt_drop);
break;
case CSIO_LNE_CLOSE:
csio_set_state(&ln->sm, csio_lns_uninit);
csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
break;
default:
csio_ln_dbg(ln,
"unexp ln event %d recv from did:x%x in "
"ln state[offline]\n", evt, ln->nport_id);
CSIO_INC_STATS(ln, n_evt_unexp);
CSIO_DB_ASSERT(0);
break;
} /* switch event */
}
/*****************************************************************************/
/* END: Lnode SM */
/*****************************************************************************/
static void
csio_free_fcfinfo(struct kref *kref)
{
struct csio_fcf_info *fcfinfo = container_of(kref,
struct csio_fcf_info, kref);
kfree(fcfinfo);
}
/* Helper routines for attributes */
/*
* csio_lnode_state_to_str - Get current state of FCOE lnode.
* @ln - lnode
* @str - state of lnode.
*
*/
void
csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
{
if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
strcpy(str, "UNINIT");
return;
}
if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
strcpy(str, "READY");
return;
}
if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
strcpy(str, "OFFLINE");
return;
}
strcpy(str, "UNKNOWN");
} /* csio_lnode_state_to_str */
int
csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
struct fw_fcoe_port_stats *port_stats)
{
struct csio_mb *mbp;
struct fw_fcoe_port_cmd_params portparams;
enum fw_retval retval;
int idx;
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
return -EINVAL;
}
portparams.portid = portid;
for (idx = 1; idx <= 3; idx++) {
portparams.idx = (idx-1)*6 + 1;
portparams.nstats = 6;
if (idx == 3)
portparams.nstats = 4;
csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
&portparams, NULL);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "Issue of FCoE port params failed!\n");
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
csio_mb_process_portparams_rsp(hw, mbp, &retval,
&portparams, port_stats);
}
mempool_free(mbp, hw->mb_mempool);
return 0;
}
/*
* csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
* @wr - WR.
* @len - WR len.
* This handler is invoked when an outstanding mgmt WR is completed.
* Its invoked in the context of FW event worker thread for every
* mgmt event received.
* Return - none.
*/
static void
csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
{
struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
struct csio_ioreq *io_req = NULL;
struct fw_fcoe_els_ct_wr *wr_cmd;
wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
csio_err(mgmtm->hw,
"Invalid ELS CT WR length recvd, len:%x\n", len);
mgmtm->stats.n_err++;
return;
}
io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
io_req->wr_status = csio_wr_status(wr_cmd);
/* lookup ioreq exists in our active Q */
spin_lock_irq(&hw->lock);
if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
csio_err(mgmtm->hw,
"Error- Invalid IO handle recv in WR. handle: %p\n",
io_req);
mgmtm->stats.n_err++;
spin_unlock_irq(&hw->lock);
return;
}
mgmtm = csio_hw_to_mgmtm(hw);
/* Dequeue from active queue */
list_del_init(&io_req->sm.sm_list);
mgmtm->stats.n_active--;
spin_unlock_irq(&hw->lock);
/* io_req will be freed by completion handler */
if (io_req->io_cbfn)
io_req->io_cbfn(hw, io_req);
}
/**
* csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
* @hw: HW module
* @cpl_op: CPL opcode
* @cmd: FW cmd/WR.
*
* Process received FCoE cmd/WR event from FW.
*/
void
csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
{
struct csio_lnode *ln;
struct csio_rnode *rn;
uint8_t portid, opcode = *(uint8_t *)cmd;
struct fw_fcoe_link_cmd *lcmd;
struct fw_wr_hdr *wr;
struct fw_rdev_wr *rdev_wr;
enum fw_fcoe_link_status lstatus;
uint32_t fcfi, rdev_flowid, vnpi;
enum csio_ln_ev evt;
if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
lcmd = (struct fw_fcoe_link_cmd *)cmd;
lstatus = lcmd->lstatus;
portid = FW_FCOE_LINK_CMD_PORTID_GET(
ntohl(lcmd->op_to_portid));
fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
if (lstatus == FCOE_LINKUP) {
/* HW lock here */
spin_lock_irq(&hw->lock);
csio_handle_link_up(hw, portid, fcfi, vnpi);
spin_unlock_irq(&hw->lock);
/* HW un lock here */
} else if (lstatus == FCOE_LINKDOWN) {
/* HW lock here */
spin_lock_irq(&hw->lock);
csio_handle_link_down(hw, portid, fcfi, vnpi);
spin_unlock_irq(&hw->lock);
/* HW un lock here */
} else {
csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
ntohl(lcmd->lstatus));
CSIO_INC_STATS(hw, n_cpl_unexp);
}
} else if (cpl_op == CPL_FW6_PLD) {
wr = (struct fw_wr_hdr *) (cmd + 4);
if (FW_WR_OP_GET(be32_to_cpu(wr->hi))
== FW_RDEV_WR) {
rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
rdev_flowid = FW_RDEV_WR_FLOWID_GET(
ntohl(rdev_wr->alloc_to_len16));
vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
ntohl(rdev_wr->flags_to_assoc_flowid));
csio_dbg(hw,
"FW_RDEV_WR: flowid:x%x ev_cause:x%x "
"vnpi:0x%x\n", rdev_flowid,
rdev_wr->event_cause, vnpi);
if (rdev_wr->protocol != PROT_FCOE) {
csio_err(hw,
"FW_RDEV_WR: invalid proto:x%x "
"received with flowid:x%x\n",
rdev_wr->protocol,
rdev_flowid);
CSIO_INC_STATS(hw, n_evt_drop);
return;
}
/* HW lock here */
spin_lock_irq(&hw->lock);
ln = csio_ln_lookup_by_vnpi(hw, vnpi);
if (!ln) {
csio_err(hw,
"FW_DEV_WR: invalid vnpi:x%x received "
"with flowid:x%x\n", vnpi, rdev_flowid);
CSIO_INC_STATS(hw, n_evt_drop);
goto out_pld;
}
rn = csio_confirm_rnode(ln, rdev_flowid,
&rdev_wr->u.fcoe_rdev);
if (!rn) {
csio_ln_dbg(ln,
"Failed to confirm rnode "
"for flowid:x%x\n", rdev_flowid);
CSIO_INC_STATS(hw, n_evt_drop);
goto out_pld;
}
/* save previous event for debugging */
ln->prev_evt = ln->cur_evt;
ln->cur_evt = rdev_wr->event_cause;
CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
/* Translate all the fabric events to lnode SM events */
evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
if (evt) {
csio_ln_dbg(ln,
"Posting event to lnode event:%d "
"cause:%d flowid:x%x\n", evt,
rdev_wr->event_cause, rdev_flowid);
csio_post_event(&ln->sm, evt);
}
/* Handover event to rn SM here. */
csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
out_pld:
spin_unlock_irq(&hw->lock);
return;
} else {
csio_warn(hw, "unexpected WR op(0x%x) recv\n",
FW_WR_OP_GET(be32_to_cpu((wr->hi))));
CSIO_INC_STATS(hw, n_cpl_unexp);
}
} else if (cpl_op == CPL_FW6_MSG) {
wr = (struct fw_wr_hdr *) (cmd);
if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
csio_ln_mgmt_wr_handler(hw, wr,
sizeof(struct fw_fcoe_els_ct_wr));
} else {
csio_warn(hw, "unexpected WR op(0x%x) recv\n",
FW_WR_OP_GET(be32_to_cpu((wr->hi))));
CSIO_INC_STATS(hw, n_cpl_unexp);
}
} else {
csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
CSIO_INC_STATS(hw, n_cpl_unexp);
}
}
/**
* csio_lnode_start - Kickstart lnode discovery.
* @ln: lnode
*
* This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
*/
int
csio_lnode_start(struct csio_lnode *ln)
{
int rv = 0;
if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
rv = csio_fcoe_enable_link(ln, 1);
ln->flags |= CSIO_LNF_LINK_ENABLE;
}
return rv;
}
/**
* csio_lnode_stop - Stop the lnode.
* @ln: lnode
*
* This routine is invoked by HW module to stop lnode and its associated NPIV
* lnodes.
*/
void
csio_lnode_stop(struct csio_lnode *ln)
{
csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
csio_fcoe_enable_link(ln, 0);
ln->flags &= ~CSIO_LNF_LINK_ENABLE;
}
csio_ln_dbg(ln, "stopping ln :%p\n", ln);
}
/**
* csio_lnode_close - Close an lnode.
* @ln: lnode
*
* This routine is invoked by HW module to close an lnode and its
* associated NPIV lnodes. Lnode and its associated NPIV lnodes are
* set to uninitialized state.
*/
void
csio_lnode_close(struct csio_lnode *ln)
{
csio_post_event_lns(ln, CSIO_LNE_CLOSE);
if (csio_is_phys_ln(ln))
ln->vnp_flowid = CSIO_INVALID_IDX;
csio_ln_dbg(ln, "closed ln :%p\n", ln);
}
/*
* csio_ln_prep_ecwr - Prepare ELS/CT WR.
* @io_req - IO request.
* @wr_len - WR len
* @immd_len - WR immediate data
* @sub_op - Sub opcode
* @sid - source portid.
* @did - destination portid
* @flow_id - flowid
* @fw_wr - ELS/CT WR to be prepared.
* Returns: 0 - on success
*/
static int
csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
uint32_t immd_len, uint8_t sub_op, uint32_t sid,
uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
{
struct fw_fcoe_els_ct_wr *wr;
uint32_t port_id;
wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) |
FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
wr_len = DIV_ROUND_UP(wr_len, 16);
wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) |
FW_WR_LEN16(wr_len));
wr->els_ct_type = sub_op;
wr->ctl_pri = 0;
wr->cp_en_class = 0;
wr->cookie = io_req->fw_handle;
wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(
io_req->lnode->hwp, io_req->iq_idx));
wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
wr->tmo_val = (uint8_t) io_req->tmo;
port_id = htonl(sid);
memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
port_id = htonl(did);
memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
/* Prepare RSP SGL */
wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
return 0;
}
/*
* csio_ln_mgmt_submit_wr - Post elsct work request.
* @mgmtm - mgmtm
* @io_req - io request.
* @sub_op - ELS or CT request type
* @pld - Dma Payload buffer
* @pld_len - Payload len
* Prepares ELSCT Work request and sents it to FW.
* Returns: 0 - on success
*/
static int
csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
uint8_t sub_op, struct csio_dma_buf *pld,
uint32_t pld_len)
{
struct csio_wr_pair wrp;
struct csio_lnode *ln = io_req->lnode;
struct csio_rnode *rn = io_req->rnode;
struct csio_hw *hw = mgmtm->hw;
uint8_t fw_wr[64];
struct ulptx_sgl dsgl;
uint32_t wr_size = 0;
uint8_t im_len = 0;
uint32_t wr_off = 0;
int ret = 0;
/* Calculate WR Size for this ELS REQ */
wr_size = sizeof(struct fw_fcoe_els_ct_wr);
/* Send as immediate data if pld < 256 */
if (pld_len < 256) {
wr_size += ALIGN(pld_len, 8);
im_len = (uint8_t)pld_len;
} else
wr_size += sizeof(struct ulptx_sgl);
/* Roundup WR size in units of 16 bytes */
wr_size = ALIGN(wr_size, 16);
/* Get WR to send ELS REQ */
ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
if (ret != 0) {
csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
io_req, ret);
return ret;
}
/* Prepare Generic WR used by all ELS/CT cmd */
csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
ln->nport_id, rn->nport_id,
csio_rn_flowid(rn),
&fw_wr[0]);
/* Copy ELS/CT WR CMD */
csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
sizeof(struct fw_fcoe_els_ct_wr));
wr_off += sizeof(struct fw_fcoe_els_ct_wr);
/* Copy payload to Immediate section of WR */
if (im_len)
csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
else {
/* Program DSGL to dma payload */
dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
ULPTX_MORE | ULPTX_NSGE(1));
dsgl.len0 = cpu_to_be32(pld_len);
dsgl.addr0 = cpu_to_be64(pld->paddr);
csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
sizeof(struct ulptx_sgl));
}
/* Issue work request to xmit ELS/CT req to FW */
csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
return ret;
}
/*
* csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
* @io_req - IO Request
* @io_cbfn - Completion handler.
* @req_type - ELS or CT request type
* @pld - Dma Payload buffer
* @pld_len - Payload len
*
*
* This API used submit managment ELS/CT request.
* This called with hw lock held
* Returns: 0 - on success
* -ENOMEM - on error.
*/
static int
csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
uint32_t pld_len)
{
struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
int rv;
io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
io_req->fw_handle = (uintptr_t) (io_req);
io_req->eq_idx = mgmtm->eq_idx;
io_req->iq_idx = mgmtm->iq_idx;
rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
if (rv == 0) {
list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
mgmtm->stats.n_active++;
}
return rv;
}
/*
* csio_ln_fdmi_init - FDMI Init entry point.
* @ln: lnode
*/
static int
csio_ln_fdmi_init(struct csio_lnode *ln)
{
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct csio_dma_buf *dma_buf;
/* Allocate MGMT request required for FDMI */
ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
if (!ln->mgmt_req) {
csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
CSIO_INC_STATS(hw, n_err_nomem);
return -ENOMEM;
}
/* Allocate Dma buffers for FDMI response Payload */
dma_buf = &ln->mgmt_req->dma_buf;
dma_buf->len = 2048;
dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
&dma_buf->paddr);
if (!dma_buf->vaddr) {
csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
kfree(ln->mgmt_req);
ln->mgmt_req = NULL;
return -ENOMEM;
}
ln->flags |= CSIO_LNF_FDMI_ENABLE;
return 0;
}
/*
* csio_ln_fdmi_exit - FDMI exit entry point.
* @ln: lnode
*/
static int
csio_ln_fdmi_exit(struct csio_lnode *ln)
{
struct csio_dma_buf *dma_buf;
struct csio_hw *hw = csio_lnode_to_hw(ln);
if (!ln->mgmt_req)
return 0;
dma_buf = &ln->mgmt_req->dma_buf;
if (dma_buf->vaddr)
pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
dma_buf->paddr);
kfree(ln->mgmt_req);
return 0;
}
int
csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
unsigned long time, unsigned long max_scan_ticks,
unsigned long delta_scan_ticks)
{
int rv = 0;
if (time >= max_scan_ticks)
return 1;
if (!ln->tgt_scan_tick)
ln->tgt_scan_tick = ticks;
if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
if (!ln->last_scan_ntgts)
ln->last_scan_ntgts = ln->n_scsi_tgts;
else {
if (ln->last_scan_ntgts == ln->n_scsi_tgts)
return 1;
ln->last_scan_ntgts = ln->n_scsi_tgts;
}
ln->tgt_scan_tick = ticks;
}
return rv;
}
/*
* csio_notify_lnodes:
* @hw: HW module
* @note: Notification
*
* Called from the HW SM to fan out notifications to the
* Lnode SM. Since the HW SM is entered with lock held,
* there is no need to hold locks here.
*
*/
void
csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
{
struct list_head *tmp;
struct csio_lnode *ln;
csio_dbg(hw, "Notifying all nodes of event %d\n", note);
/* Traverse children lnodes list and send evt */
list_for_each(tmp, &hw->sln_head) {
ln = (struct csio_lnode *) tmp;
switch (note) {
case CSIO_LN_NOTIFY_HWREADY:
csio_lnode_start(ln);
break;
case CSIO_LN_NOTIFY_HWRESET:
case CSIO_LN_NOTIFY_HWREMOVE:
csio_lnode_close(ln);
break;
case CSIO_LN_NOTIFY_HWSTOP:
csio_lnode_stop(ln);
break;
default:
break;
}
}
}
/*
* csio_disable_lnodes:
* @hw: HW module
* @portid:port id
* @disable: disable/enable flag.
* If disable=1, disables all lnode hosted on given physical port.
* otherwise enables all the lnodes on given phsysical port.
* This routine need to called with hw lock held.
*/
void
csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
{
struct list_head *tmp;
struct csio_lnode *ln;
csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
/* Traverse sibling lnodes list and send evt */
list_for_each(tmp, &hw->sln_head) {
ln = (struct csio_lnode *) tmp;
if (ln->portid != portid)
continue;
if (disable)
csio_lnode_stop(ln);
else
csio_lnode_start(ln);
}
}
/*
* csio_ln_init - Initialize an lnode.
* @ln: lnode
*
*/
static int
csio_ln_init(struct csio_lnode *ln)
{
int rv = -EINVAL;
struct csio_lnode *rln, *pln;
struct csio_hw *hw = csio_lnode_to_hw(ln);
csio_init_state(&ln->sm, csio_lns_uninit);
ln->vnp_flowid = CSIO_INVALID_IDX;
ln->fcf_flowid = CSIO_INVALID_IDX;
if (csio_is_root_ln(ln)) {
/* This is the lnode used during initialization */
ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
if (!ln->fcfinfo) {
csio_ln_err(ln, "Failed to alloc FCF record\n");
CSIO_INC_STATS(hw, n_err_nomem);
goto err;
}
INIT_LIST_HEAD(&ln->fcf_lsthead);
kref_init(&ln->fcfinfo->kref);
if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
goto err;
} else { /* Either a non-root physical or a virtual lnode */
/*
* THe rest is common for non-root physical and NPIV lnodes.
* Just get references to all other modules
*/
rln = csio_root_lnode(ln);
if (csio_is_npiv_ln(ln)) {
/* NPIV */
pln = csio_parent_lnode(ln);
kref_get(&pln->fcfinfo->kref);
ln->fcfinfo = pln->fcfinfo;
} else {
/* Another non-root physical lnode (FCF) */
ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
GFP_KERNEL);
if (!ln->fcfinfo) {
csio_ln_err(ln, "Failed to alloc FCF info\n");
CSIO_INC_STATS(hw, n_err_nomem);
goto err;
}
kref_init(&ln->fcfinfo->kref);
if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
goto err;
}
} /* if (!csio_is_root_ln(ln)) */
return 0;
err:
return rv;
}
static void
csio_ln_exit(struct csio_lnode *ln)
{
struct csio_lnode *pln;
csio_cleanup_rns(ln);
if (csio_is_npiv_ln(ln)) {
pln = csio_parent_lnode(ln);
kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
} else {
kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
if (csio_fdmi_enable)
csio_ln_fdmi_exit(ln);
}
ln->fcfinfo = NULL;
}
/**
* csio_lnode_init - Initialize the members of an lnode.
* @ln: lnode
*
*/
int
csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
struct csio_lnode *pln)
{
int rv = -EINVAL;
/* Link this lnode to hw */
csio_lnode_to_hw(ln) = hw;
/* Link child to parent if child lnode */
if (pln)
ln->pln = pln;
else
ln->pln = NULL;
/* Initialize scsi_tgt and timers to zero */
ln->n_scsi_tgts = 0;
ln->last_scan_ntgts = 0;
ln->tgt_scan_tick = 0;
/* Initialize rnode list */
INIT_LIST_HEAD(&ln->rnhead);
INIT_LIST_HEAD(&ln->cln_head);
/* Initialize log level for debug */
ln->params.log_level = hw->params.log_level;
if (csio_ln_init(ln))
goto err;
/* Add lnode to list of sibling or children lnodes */
spin_lock_irq(&hw->lock);
list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
if (pln)
pln->num_vports++;
spin_unlock_irq(&hw->lock);
hw->num_lns++;
return 0;
err:
csio_lnode_to_hw(ln) = NULL;
return rv;
}
/**
* csio_lnode_exit - De-instantiate an lnode.
* @ln: lnode
*
*/
void
csio_lnode_exit(struct csio_lnode *ln)
{
struct csio_hw *hw = csio_lnode_to_hw(ln);
csio_ln_exit(ln);
/* Remove this lnode from hw->sln_head */
spin_lock_irq(&hw->lock);
list_del_init(&ln->sm.sm_list);
/* If it is children lnode, decrement the
* counter in its parent lnode
*/
if (ln->pln)
ln->pln->num_vports--;
/* Update root lnode pointer */
if (list_empty(&hw->sln_head))
hw->rln = NULL;
else
hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
spin_unlock_irq(&hw->lock);
csio_lnode_to_hw(ln) = NULL;
hw->num_lns--;
}
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_LNODE_H__
#define __CSIO_LNODE_H__
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <scsi/fc/fc_els.h>
#include "csio_defs.h"
#include "csio_hw.h"
#define CSIO_FCOE_MAX_NPIV 128
#define CSIO_FCOE_MAX_RNODES 2048
/* FDMI port attribute unknown speed */
#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000
extern int csio_fcoe_rnodes;
extern int csio_fdmi_enable;
/* State machine evets */
enum csio_ln_ev {
CSIO_LNE_NONE = (uint32_t)0,
CSIO_LNE_LINKUP,
CSIO_LNE_FAB_INIT_DONE,
CSIO_LNE_LINK_DOWN,
CSIO_LNE_DOWN_LINK,
CSIO_LNE_LOGO,
CSIO_LNE_CLOSE,
CSIO_LNE_MAX_EVENT,
};
struct csio_fcf_info {
struct list_head list;
uint8_t priority;
uint8_t mac[6];
uint8_t name_id[8];
uint8_t fabric[8];
uint16_t vf_id;
uint8_t vlan_id;
uint16_t max_fcoe_size;
uint8_t fc_map[3];
uint32_t fka_adv;
uint32_t fcfi;
uint8_t get_next:1;
uint8_t link_aff:1;
uint8_t fpma:1;
uint8_t spma:1;
uint8_t login:1;
uint8_t portid;
uint8_t spma_mac[6];
struct kref kref;
};
/* Defines for flags */
#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */
#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */
#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */
#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */
/* Transport events */
enum csio_ln_fc_evt {
CSIO_LN_FC_LINKUP = 1,
CSIO_LN_FC_LINKDOWN,
CSIO_LN_FC_RSCN,
CSIO_LN_FC_ATTRIB_UPDATE,
};
/* Lnode stats */
struct csio_lnode_stats {
uint32_t n_link_up; /* Link down */
uint32_t n_link_down; /* Link up */
uint32_t n_err; /* error */
uint32_t n_err_nomem; /* memory not available */
uint32_t n_inval_parm; /* Invalid parameters */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* dropped event */
uint32_t n_rnode_match; /* matched rnode */
uint32_t n_dev_loss_tmo; /* Device loss timeout */
uint32_t n_fdmi_err; /* fdmi err */
uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
uint32_t n_rnode_alloc; /* rnode allocated */
uint32_t n_rnode_free; /* rnode freed */
uint32_t n_rnode_nomem; /* rnode alloc failure */
uint32_t n_input_requests; /* Input Requests */
uint32_t n_output_requests; /* Output Requests */
uint32_t n_control_requests; /* Control Requests */
uint32_t n_input_bytes; /* Input Bytes */
uint32_t n_output_bytes; /* Output Bytes */
uint32_t rsvd1;
};
/* Common Lnode params */
struct csio_lnode_params {
uint32_t ra_tov;
uint32_t fcfi;
uint32_t log_level; /* Module level for debugging */
};
struct csio_service_parms {
struct fc_els_csp csp; /* Common service parms */
uint8_t wwpn[8]; /* WWPN */
uint8_t wwnn[8]; /* WWNN */
struct fc_els_cssp clsp[4]; /* Class service params */
uint8_t vvl[16]; /* Vendor version level */
};
/* Lnode */
struct csio_lnode {
struct csio_sm sm; /* State machine + sibling
* lnode list.
*/
struct csio_hw *hwp; /* Pointer to the HW module */
uint8_t portid; /* Port ID */
uint8_t rsvd1;
uint16_t rsvd2;
uint32_t dev_num; /* Device number */
uint32_t flags; /* Flags */
struct list_head fcf_lsthead; /* FCF entries */
struct csio_fcf_info *fcfinfo; /* FCF in use */
struct csio_ioreq *mgmt_req; /* MGMT request */
/* FCoE identifiers */
uint8_t mac[6];
uint32_t nport_id;
struct csio_service_parms ln_sparm; /* Service parms */
/* Firmware identifiers */
uint32_t fcf_flowid; /*fcf flowid */
uint32_t vnp_flowid;
uint16_t ssn_cnt; /* Registered Session */
uint8_t cur_evt; /* Current event */
uint8_t prev_evt; /* Previous event */
/* Children */
struct list_head cln_head; /* Head of the children lnode
* list.
*/
uint32_t num_vports; /* Total NPIV/children LNodes*/
struct csio_lnode *pln; /* Parent lnode of child
* lnodes.
*/
struct list_head cmpl_q; /* Pending I/Os on this lnode */
/* Remote node information */
struct list_head rnhead; /* Head of rnode list */
uint32_t num_reg_rnodes; /* Number of rnodes registered
* with the host.
*/
uint32_t n_scsi_tgts; /* Number of scsi targets
* found
*/
uint32_t last_scan_ntgts;/* Number of scsi targets
* found per last scan.
*/
uint32_t tgt_scan_tick; /* timer started after
* new tgt found
*/
/* FC transport data */
struct fc_vport *fc_vport;
struct fc_host_statistics fch_stats;
struct csio_lnode_stats stats; /* Common lnode stats */
struct csio_lnode_params params; /* Common lnode params */
};
#define csio_lnode_to_hw(ln) ((ln)->hwp)
#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln)
#define csio_parent_lnode(ln) ((ln)->pln)
#define csio_ln_flowid(ln) ((ln)->vnp_flowid)
#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn)
#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn)
#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0)
#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0)
#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0)
#define csio_ln_dbg(_ln, _fmt, ...) \
csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
#define csio_ln_err(_ln, _fmt, ...) \
csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
#define csio_ln_warn(_ln, _fmt, ...) \
csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
/* HW->Lnode notifications */
enum csio_ln_notify {
CSIO_LN_NOTIFY_HWREADY = 1,
CSIO_LN_NOTIFY_HWSTOP,
CSIO_LN_NOTIFY_HWREMOVE,
CSIO_LN_NOTIFY_HWRESET,
};
void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *);
int csio_is_lnode_ready(struct csio_lnode *);
void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str);
struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *);
int csio_get_phy_port_stats(struct csio_hw *, uint8_t ,
struct fw_fcoe_port_stats *);
int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long,
unsigned long, unsigned long);
void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify);
void csio_disable_lnodes(struct csio_hw *, uint8_t, bool);
void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt);
int csio_ln_fdmi_start(struct csio_lnode *, void *);
int csio_lnode_start(struct csio_lnode *);
void csio_lnode_stop(struct csio_lnode *);
void csio_lnode_close(struct csio_lnode *);
int csio_lnode_init(struct csio_lnode *, struct csio_hw *,
struct csio_lnode *);
void csio_lnode_exit(struct csio_lnode *);
#endif /* ifndef __CSIO_LNODE_H__ */
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include "csio_hw.h"
#include "csio_lnode.h"
#include "csio_rnode.h"
#include "csio_mb.h"
#include "csio_wr.h"
#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
/* MB Command/Response Helpers */
/*
* csio_mb_fw_retval - FW return value from a mailbox response.
* @mbp: Mailbox structure
*
*/
enum fw_retval
csio_mb_fw_retval(struct csio_mb *mbp)
{
struct fw_cmd_hdr *hdr;
hdr = (struct fw_cmd_hdr *)(mbp->mb);
return FW_CMD_RETVAL_GET(ntohl(hdr->lo));
}
/*
* csio_mb_hello - FW HELLO command helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @m_mbox: Master mailbox number, if any.
* @a_mbox: Mailbox number for asycn notifications.
* @master: Device mastership.
* @cbfn: Callback, if any.
*
*/
void
csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->err_to_clearinit = htonl(
FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |
FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?
m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |
FW_HELLO_CMD_MBASYNCNOT(a_mbox) |
FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
FW_HELLO_CMD_CLEARINIT);
}
/*
* csio_mb_process_hello_rsp - FW HELLO response processing helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @retval: Mailbox return value from Firmware
* @state: State that the function is in.
* @mpfn: Master pfn
*
*/
void
csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
enum fw_retval *retval, enum csio_dev_state *state,
uint8_t *mpfn)
{
struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
uint32_t value;
*retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
if (*retval == FW_SUCCESS) {
hw->fwrev = ntohl(rsp->fwrev);
value = ntohl(rsp->err_to_clearinit);
*mpfn = FW_HELLO_CMD_MBMASTER_GET(value);
if (value & FW_HELLO_CMD_INIT)
*state = CSIO_DEV_STATE_INIT;
else if (value & FW_HELLO_CMD_ERR)
*state = CSIO_DEV_STATE_ERR;
else
*state = CSIO_DEV_STATE_UNINIT;
}
}
/*
* csio_mb_bye - FW BYE command helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @cbfn: Callback, if any.
*
*/
void
csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
}
/*
* csio_mb_reset - FW RESET command helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @reset: Type of reset.
* @cbfn: Callback, if any.
*
*/
void
csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
int reset, int halt,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->val = htonl(reset);
cmdp->halt_pkd = htonl(halt);
}
/*
* csio_mb_params - FW PARAMS command helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @tmo: Command timeout.
* @pf: PF number.
* @vf: VF number.
* @nparams: Number of paramters
* @params: Parameter mnemonic array.
* @val: Parameter value array.
* @wr: Write/Read PARAMS.
* @cbfn: Callback, if any.
*
*/
void
csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
unsigned int pf, unsigned int vf, unsigned int nparams,
const u32 *params, u32 *val, bool wr,
void (*cbfn)(struct csio_hw *, struct csio_mb *))
{
uint32_t i;
uint32_t temp_params = 0, temp_val = 0;
struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
__be32 *p = &cmdp->param[0].mnem;
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) |
FW_CMD_REQUEST |
(wr ? FW_CMD_WRITE : FW_CMD_READ) |
FW_PARAMS_CMD_PFN(pf) |
FW_PARAMS_CMD_VFN(vf));
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
/* Write Params */
if (wr) {
while (nparams--) {
temp_params = *params++;
temp_val = *val++;
*p++ = htonl(temp_params);
*p++ = htonl(temp_val);
}
} else {
for (i = 0; i < nparams; i++, p += 2) {
temp_params = *params++;
*p = htonl(temp_params);
}
}
}
/*
* csio_mb_process_read_params_rsp - FW PARAMS response processing helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @retval: Mailbox return value from Firmware
* @nparams: Number of parameters
* @val: Parameter value array.
*
*/
void
csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
enum fw_retval *retval, unsigned int nparams,
u32 *val)
{
struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
uint32_t i;
__be32 *p = &rsp->param[0].val;
*retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
if (*retval == FW_SUCCESS)
for (i = 0; i < nparams; i++, p += 2)
*val++ = ntohl(*p);
}
/*
* csio_mb_ldst - FW LDST command
* @hw: The HW structure
* @mbp: Mailbox structure
* @tmo: timeout
* @reg: register
*
*/
void
csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
{
struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
/*
* Construct and send the Firmware LDST Command to retrieve the
* specified PCI-E Configuration Space register.
*/
ldst_cmd->op_to_addrspace =
htonl(FW_CMD_OP(FW_LDST_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ |
FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
ldst_cmd->u.pcie.ctrl_to_fn =
(FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
ldst_cmd->u.pcie.r = (uint8_t)reg;
}
/*
*
* csio_mb_caps_config - FW Read/Write Capabilities command helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @wr: Write if 1, Read if 0
* @init: Turn on initiator mode.
* @tgt: Turn on target mode.
* @cofld: If 1, Control Offload for FCoE
* @cbfn: Callback, if any.
*
* This helper assumes that cmdp has MB payload from a previous CAPS
* read command.
*/
void
csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
bool wr, bool init, bool tgt, bool cofld,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_caps_config_cmd *cmdp =
(struct fw_caps_config_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
(wr ? FW_CMD_WRITE : FW_CMD_READ));
cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
/* Read config */
if (!wr)
return;
/* Write config */
cmdp->fcoecaps = 0;
if (cofld)
cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
if (init)
cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
if (tgt)
cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
}
void
csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp,
uint32_t tmo, uint8_t mode, unsigned int flags,
void (*cbfn)(struct csio_hw *, struct csio_mb *))
{
struct fw_rss_glb_config_cmd *cmdp =
(struct fw_rss_glb_config_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
cmdp->u.manual.mode_pkd =
htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
cmdp->u.basicvirtual.mode_pkd =
htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
}
}
/*
* csio_mb_pfvf - FW Write PF/VF capabilities command helper.
* @hw: The HW structure
* @mbp: Mailbox structure
* @pf:
* @vf:
* @txq:
* @txq_eht_ctrl:
* @rxqi:
* @rxq:
* @tc:
* @vi:
* @pmask:
* @rcaps:
* @wxcaps:
* @cbfn: Callback, if any.
*
*/
void
csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
unsigned int pf, unsigned int vf, unsigned int txq,
unsigned int txq_eth_ctrl, unsigned int rxqi,
unsigned int rxq, unsigned int tc, unsigned int vi,
unsigned int cmask, unsigned int pmask, unsigned int nexactf,
unsigned int rcaps, unsigned int wxcaps,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
FW_PFVF_CMD_PFN(pf) |
FW_PFVF_CMD_VFN(vf));
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
FW_PFVF_CMD_NIQ(rxq));
cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE |
FW_PFVF_CMD_CMASK(cmask) |
FW_PFVF_CMD_PMASK(pmask) |
FW_PFVF_CMD_NEQ(txq));
cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) |
FW_PFVF_CMD_NVI(vi) |
FW_PFVF_CMD_NEXACTF(nexactf));
cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
FW_PFVF_CMD_WX_CAPS(wxcaps) |
FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
}
#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
/*
* csio_mb_port- FW PORT command helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @tmo: COmmand timeout
* @portid: Port ID to get/set info
* @wr: Write/Read PORT information.
* @fc: Flow control
* @caps: Port capabilites to set.
* @cbfn: Callback, if any.
*
*/
void
csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
FW_CMD_REQUEST |
(wr ? FW_CMD_EXEC : FW_CMD_READ) |
FW_PORT_CMD_PORTID(portid));
if (!wr) {
cmdp->action_to_len16 = htonl(
FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
return;
}
/* Set port */
cmdp->action_to_len16 = htonl(
FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
if (fc & PAUSE_RX)
lfc |= FW_PORT_CAP_FC_RX;
if (fc & PAUSE_TX)
lfc |= FW_PORT_CAP_FC_TX;
if (!(caps & FW_PORT_CAP_ANEG))
cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
else
cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
lfc | mdi);
}
/*
* csio_mb_process_read_port_rsp - FW PORT command response processing helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @retval: Mailbox return value from Firmware
* @caps: port capabilities
*
*/
void
csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
enum fw_retval *retval, uint16_t *caps)
{
struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
*retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16));
if (*retval == FW_SUCCESS)
*caps = ntohs(rsp->u.info.pcap);
}
/*
* csio_mb_initialize - FW INITIALIZE command helper
* @hw: The HW structure
* @mbp: Mailbox structure
* @tmo: COmmand timeout
* @cbfn: Callback, if any.
*
*/
void
csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
}
/*
* csio_mb_iq_alloc - Initializes the mailbox to allocate an
* Ingress DMA queue in the firmware.
*
* @hw: The hw structure
* @mbp: Mailbox structure to initialize
* @priv: Private object
* @mb_tmo: Mailbox time-out period (in ms).
* @iq_params: Ingress queue params needed for allocation.
* @cbfn: The call-back function
*
*
*/
static void
csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t mb_tmo, struct csio_iq_params *iq_params,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
FW_CMD_REQUEST | FW_CMD_EXEC |
FW_IQ_CMD_PFN(iq_params->pfn) |
FW_IQ_CMD_VFN(iq_params->vfn));
cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->type_to_iqandstindex = htonl(
FW_IQ_CMD_VIID(iq_params->viid) |
FW_IQ_CMD_TYPE(iq_params->type) |
FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));
cmdp->fl0size = htons(iq_params->fl0size);
cmdp->fl0size = htons(iq_params->fl1size);
} /* csio_mb_iq_alloc */
/*
* csio_mb_iq_write - Initializes the mailbox for writing into an
* Ingress DMA Queue.
*
* @hw: The HW structure
* @mbp: Mailbox structure to initialize
* @priv: Private object
* @mb_tmo: Mailbox time-out period (in ms).
* @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
* @iq_params: Ingress queue params needed for writing.
* @cbfn: The call-back function
*
* NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
* because this IQ write request can be cascaded with a previous
* IQ alloc request, and we dont want to over-write the bits set by
* that request. This logic will work even in a non-cascaded case, since the
* cmdp structure is zeroed out by CSIO_INIT_MBP.
*/
static void
csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t mb_tmo, bool cascaded_req,
struct csio_iq_params *iq_params,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
uint32_t iq_start_stop = (iq_params->iq_start) ?
FW_IQ_CMD_IQSTART(1) :
FW_IQ_CMD_IQSTOP(1);
/*
* If this IQ write is cascaded with IQ alloc request, do not
* re-initialize with 0's.
*
*/
if (!cascaded_req)
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE |
FW_IQ_CMD_PFN(iq_params->pfn) |
FW_IQ_CMD_VFN(iq_params->vfn));
cmdp->alloc_to_len16 |= htonl(iq_start_stop |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->iqid |= htons(iq_params->iqid);
cmdp->fl0id |= htons(iq_params->fl0id);
cmdp->fl1id |= htons(iq_params->fl1id);
cmdp->type_to_iqandstindex |= htonl(
FW_IQ_CMD_IQANDST(iq_params->iqandst) |
FW_IQ_CMD_IQANUS(iq_params->iqanus) |
FW_IQ_CMD_IQANUD(iq_params->iqanud) |
FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));
cmdp->iqdroprss_to_iqesize |= htons(
FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |
FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |
FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |
FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |
FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |
FW_IQ_CMD_IQESIZE(iq_params->iqesize));
cmdp->iqsize |= htons(iq_params->iqsize);
cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
if (iq_params->type == 0) {
cmdp->iqns_to_fl0congen |= htonl(
FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|
FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));
}
if (iq_params->fl0size && iq_params->fl0addr &&
(iq_params->fl0id != 0xFFFF)) {
cmdp->iqns_to_fl0congen |= htonl(
FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|
FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |
FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |
FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));
cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |
FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |
FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |
FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |
FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));
cmdp->fl0size |= htons(iq_params->fl0size);
cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
}
} /* csio_mb_iq_write */
/*
* csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
* Ingress DMA Queue.
*
* @hw: The HW structure
* @mbp: Mailbox structure to initialize
* @priv: Private data.
* @mb_tmo: Mailbox time-out period (in ms).
* @iq_params: Ingress queue params needed for allocation & writing.
* @cbfn: The call-back function
*
*
*/
void
csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t mb_tmo, struct csio_iq_params *iq_params,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);
} /* csio_mb_iq_alloc_write */
/*
* csio_mb_iq_alloc_write_rsp - Process the allocation & writing
* of ingress DMA queue mailbox's response.
*
* @hw: The HW structure.
* @mbp: Mailbox structure to initialize.
* @retval: Firmware return value.
* @iq_params: Ingress queue parameters, after allocation and write.
*
*/
void
csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
enum fw_retval *ret_val,
struct csio_iq_params *iq_params)
{
struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
*ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
if (*ret_val == FW_SUCCESS) {
iq_params->physiqid = ntohs(rsp->physiqid);
iq_params->iqid = ntohs(rsp->iqid);
iq_params->fl0id = ntohs(rsp->fl0id);
iq_params->fl1id = ntohs(rsp->fl1id);
} else {
iq_params->physiqid = iq_params->iqid =
iq_params->fl0id = iq_params->fl1id = 0;
}
} /* csio_mb_iq_alloc_write_rsp */
/*
* csio_mb_iq_free - Initializes the mailbox for freeing a
* specified Ingress DMA Queue.
*
* @hw: The HW structure
* @mbp: Mailbox structure to initialize
* @priv: Private data
* @mb_tmo: Mailbox time-out period (in ms).
* @iq_params: Parameters of ingress queue, that is to be freed.
* @cbfn: The call-back function
*
*
*/
void
csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t mb_tmo, struct csio_iq_params *iq_params,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
FW_CMD_REQUEST | FW_CMD_EXEC |
FW_IQ_CMD_PFN(iq_params->pfn) |
FW_IQ_CMD_VFN(iq_params->vfn));
cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));
cmdp->iqid = htons(iq_params->iqid);
cmdp->fl0id = htons(iq_params->fl0id);
cmdp->fl1id = htons(iq_params->fl1id);
} /* csio_mb_iq_free */
/*
* csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
* an offload-egress queue.
*
* @hw: The HW structure
* @mbp: Mailbox structure to initialize
* @priv: Private data
* @mb_tmo: Mailbox time-out period (in ms).
* @eq_ofld_params: (Offload) Egress queue paramters.
* @cbfn: The call-back function
*
*
*/
static void
csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
FW_CMD_REQUEST | FW_CMD_EXEC |
FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
} /* csio_mb_eq_ofld_alloc */
/*
* csio_mb_eq_ofld_write - Initializes the mailbox for writing
* an alloacted offload-egress queue.
*
* @hw: The HW structure
* @mbp: Mailbox structure to initialize
* @priv: Private data
* @mb_tmo: Mailbox time-out period (in ms).
* @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
* @eq_ofld_params: (Offload) Egress queue paramters.
* @cbfn: The call-back function
*
*
* NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
* because this EQ write request can be cascaded with a previous
* EQ alloc request, and we dont want to over-write the bits set by
* that request. This logic will work even in a non-cascaded case, since the
* cmdp structure is zeroed out by CSIO_INIT_MBP.
*/
static void
csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t mb_tmo, bool cascaded_req,
struct csio_eq_params *eq_ofld_params,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;
/*
* If this EQ write is cascaded with EQ alloc request, do not
* re-initialize with 0's.
*
*/
if (!cascaded_req)
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE |
FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
cmdp->alloc_to_len16 |= htonl(eq_start_stop |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
cmdp->fetchszm_to_iqid |= htonl(
FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |
FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |
FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |
FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));
cmdp->dcaen_to_eqsize |= htonl(
FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |
FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |
FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |
FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |
FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |
FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |
FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));
cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
} /* csio_mb_eq_ofld_write */
/*
* csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
* writing into an Engress DMA Queue.
*
* @hw: The HW structure
* @mbp: Mailbox structure to initialize
* @priv: Private data.
* @mb_tmo: Mailbox time-out period (in ms).
* @eq_ofld_params: (Offload) Egress queue paramters.
* @cbfn: The call-back function
*
*
*/
void
csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
void *priv, uint32_t mb_tmo,
struct csio_eq_params *eq_ofld_params,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,
eq_ofld_params, cbfn);
} /* csio_mb_eq_ofld_alloc_write */
/*
* csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
* & write egress DMA queue mailbox's response.
*
* @hw: The HW structure.
* @mbp: Mailbox structure to initialize.
* @retval: Firmware return value.
* @eq_ofld_params: (Offload) Egress queue paramters.
*
*/
void
csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
struct csio_mb *mbp, enum fw_retval *ret_val,
struct csio_eq_params *eq_ofld_params)
{
struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
*ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
if (*ret_val == FW_SUCCESS) {
eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(
ntohl(rsp->eqid_pkd));
eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(
ntohl(rsp->physeqid_pkd));
} else
eq_ofld_params->eqid = 0;
} /* csio_mb_eq_ofld_alloc_write_rsp */
/*
* csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
* specified Engress DMA Queue.
*
* @hw: The HW structure
* @mbp: Mailbox structure to initialize
* @priv: Private data area.
* @mb_tmo: Mailbox time-out period (in ms).
* @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed.
* @cbfn: The call-back function
*
*
*/
void
csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
FW_CMD_REQUEST | FW_CMD_EXEC |
FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
} /* csio_mb_eq_ofld_free */
/*
* csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
* condition.
*
* @ln: The Lnode structure
* @mbp: Mailbox structure to initialize
* @mb_tmo: Mailbox time-out period (in ms).
* @cbfn: The call back function.
*
*
*/
void
csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
uint8_t cos, bool link_status, uint32_t fcfi,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_fcoe_link_cmd *cmdp =
(struct fw_fcoe_link_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
cmdp->op_to_portid = htonl((
FW_CMD_OP(FW_FCOE_LINK_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE |
FW_FCOE_LINK_CMD_PORTID(port_id)));
cmdp->sub_opcode_fcfi = htonl(
FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
FW_FCOE_LINK_CMD_FCFI(fcfi));
cmdp->lstatus = link_status;
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
} /* csio_write_fcoe_link_cond_init_mb */
/*
* csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
* resource information(FW_GET_RES_INFO_CMD).
*
* @hw: The HW structure
* @mbp: Mailbox structure to initialize
* @mb_tmo: Mailbox time-out period (in ms).
* @cbfn: The call-back function
*
*
*/
void
csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
uint32_t mb_tmo,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_fcoe_res_info_cmd *cmdp =
(struct fw_fcoe_res_info_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ));
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
} /* csio_fcoe_read_res_info_init_mb */
/*
* csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
* in the firmware (FW_FCOE_VNP_CMD).
*
* @ln: The Lnode structure.
* @mbp: Mailbox structure to initialize.
* @mb_tmo: Mailbox time-out period (in ms).
* @fcfi: FCF Index.
* @vnpi: vnpi
* @iqid: iqid
* @vnport_wwnn: vnport WWNN
* @vnport_wwpn: vnport WWPN
* @cbfn: The call-back function.
*
*
*/
void
csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_fcoe_vnp_cmd *cmdp =
(struct fw_fcoe_vnp_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) |
FW_CMD_REQUEST |
FW_CMD_EXEC |
FW_FCOE_VNP_CMD_FCFI(fcfi)));
cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
cmdp->iqid = htons(iqid);
if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))
cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
if (vnport_wwnn)
memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
if (vnport_wwpn)
memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
} /* csio_fcoe_vnp_alloc_init_mb */
/*
* csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
* @ln: The Lnode structure.
* @mbp: Mailbox structure to initialize.
* @mb_tmo: Mailbox time-out period (in ms).
* @fcfi: FCF Index.
* @vnpi: vnpi
* @cbfn: The call-back handler.
*/
void
csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_fcoe_vnp_cmd *cmdp =
(struct fw_fcoe_vnp_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ |
FW_FCOE_VNP_CMD_FCFI(fcfi));
cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
}
/*
* csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
* alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
*
* @ln: The Lnode structure.
* @mbp: Mailbox structure to initialize.
* @mb_tmo: Mailbox time-out period (in ms).
* @fcfi: FCF flow id
* @vnpi: VNP flow id
* @cbfn: The call-back function.
* Return: None
*/
void
csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_fcoe_vnp_cmd *cmdp =
(struct fw_fcoe_vnp_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
FW_CMD_REQUEST |
FW_CMD_EXEC |
FW_FCOE_VNP_CMD_FCFI(fcfi));
cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
FW_CMD_LEN16(sizeof(*cmdp) / 16));
cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
}
/*
* csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
* FCF records.
*
* @ln: The Lnode structure
* @mbp: Mailbox structure to initialize
* @mb_tmo: Mailbox time-out period (in ms).
* @fcf_params: FC-Forwarder parameters.
* @cbfn: The call-back function
*
*
*/
void
csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_fcoe_fcf_cmd *cmdp =
(struct fw_fcoe_fcf_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ |
FW_FCOE_FCF_CMD_FCFI(fcfi));
cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
} /* csio_fcoe_read_fcf_init_mb */
void
csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
uint32_t mb_tmo,
struct fw_fcoe_port_cmd_params *portparams,
void (*cbfn)(struct csio_hw *,
struct csio_mb *))
{
struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
mbp->mb_size = 64;
cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16));
cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
FW_FCOE_STATS_CMD_PORT(portparams->portid);
cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
FW_FCOE_STATS_CMD_PORT_VALID;
} /* csio_fcoe_read_portparams_init_mb */
void
csio_mb_process_portparams_rsp(
struct csio_hw *hw,
struct csio_mb *mbp,
enum fw_retval *retval,
struct fw_fcoe_port_cmd_params *portparams,
struct fw_fcoe_port_stats *portstats
)
{
struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
struct fw_fcoe_port_stats stats;
uint8_t *src;
uint8_t *dst;
*retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16));
memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
if (*retval == FW_SUCCESS) {
dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
memcpy(dst, src, (portparams->nstats * 8));
if (portparams->idx == 1) {
/* Get the first 6 flits from the Mailbox */
portstats->tx_bcast_bytes =
be64_to_cpu(stats.tx_bcast_bytes);
portstats->tx_bcast_frames =
be64_to_cpu(stats.tx_bcast_frames);
portstats->tx_mcast_bytes =
be64_to_cpu(stats.tx_mcast_bytes);
portstats->tx_mcast_frames =
be64_to_cpu(stats.tx_mcast_frames);
portstats->tx_ucast_bytes =
be64_to_cpu(stats.tx_ucast_bytes);
portstats->tx_ucast_frames =
be64_to_cpu(stats.tx_ucast_frames);
}
if (portparams->idx == 7) {
/* Get the second 6 flits from the Mailbox */
portstats->tx_drop_frames =
be64_to_cpu(stats.tx_drop_frames);
portstats->tx_offload_bytes =
be64_to_cpu(stats.tx_offload_bytes);
portstats->tx_offload_frames =
be64_to_cpu(stats.tx_offload_frames);
#if 0
portstats->rx_pf_bytes =
be64_to_cpu(stats.rx_pf_bytes);
portstats->rx_pf_frames =
be64_to_cpu(stats.rx_pf_frames);
#endif
portstats->rx_bcast_bytes =
be64_to_cpu(stats.rx_bcast_bytes);
portstats->rx_bcast_frames =
be64_to_cpu(stats.rx_bcast_frames);
portstats->rx_mcast_bytes =
be64_to_cpu(stats.rx_mcast_bytes);
}
if (portparams->idx == 13) {
/* Get the last 4 flits from the Mailbox */
portstats->rx_mcast_frames =
be64_to_cpu(stats.rx_mcast_frames);
portstats->rx_ucast_bytes =
be64_to_cpu(stats.rx_ucast_bytes);
portstats->rx_ucast_frames =
be64_to_cpu(stats.rx_ucast_frames);
portstats->rx_err_frames =
be64_to_cpu(stats.rx_err_frames);
}
}
}
/* Entry points/APIs for MB module */
/*
* csio_mb_intr_enable - Enable Interrupts from mailboxes.
* @hw: The HW structure
*
* Enables CIM interrupt bit in appropriate INT_ENABLE registers.
*/
void
csio_mb_intr_enable(struct csio_hw *hw)
{
csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
}
/*
* csio_mb_intr_disable - Disable Interrupts from mailboxes.
* @hw: The HW structure
*
* Disable bit in HostInterruptEnable CIM register.
*/
void
csio_mb_intr_disable(struct csio_hw *hw)
{
csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
}
static void
csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
{
struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {
csio_info(hw, "FW print message:\n");
csio_info(hw, "\tdebug->dprtstridx = %d\n",
ntohs(dbg->u.prt.dprtstridx));
csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
ntohl(dbg->u.prt.dprtstrparam0));
csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
ntohl(dbg->u.prt.dprtstrparam1));
csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
ntohl(dbg->u.prt.dprtstrparam2));
csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
ntohl(dbg->u.prt.dprtstrparam3));
} else {
/* This is a FW assertion */
csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
dbg->u.assert.filename_0_7,
ntohl(dbg->u.assert.line),
ntohl(dbg->u.assert.x),
ntohl(dbg->u.assert.y));
}
}
static void
csio_mb_debug_cmd_handler(struct csio_hw *hw)
{
int i;
__be64 cmd[CSIO_MB_MAX_REGS];
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
int size = sizeof(struct fw_debug_cmd);
/* Copy mailbox data */
for (i = 0; i < size; i += 8)
cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
csio_mb_dump_fw_dbg(hw, cmd);
/* Notify FW of mailbox by setting owner as UP */
csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
ctl_reg);
csio_rd_reg32(hw, ctl_reg);
wmb();
}
/*
* csio_mb_issue - generic routine for issuing Mailbox commands.
* @hw: The HW structure
* @mbp: Mailbox command to issue
*
* Caller should hold hw lock across this call.
*/
int
csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
{
uint32_t owner, ctl;
int i;
uint32_t ii;
__be64 *cmd = mbp->mb;
__be64 hdr;
struct csio_mbm *mbm = &hw->mbm;
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
int size = mbp->mb_size;
int rv = -EINVAL;
struct fw_cmd_hdr *fw_hdr;
/* Determine mode */
if (mbp->mb_cbfn == NULL) {
/* Need to issue/get results in the same context */
if (mbp->tmo < CSIO_MB_POLL_FREQ) {
csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
goto error_out;
}
} else if (!csio_is_host_intr_enabled(hw) ||
!csio_is_hw_intr_enabled(hw)) {
csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
*((uint8_t *)mbp->mb));
goto error_out;
}
if (mbm->mcurrent != NULL) {
/* Queue mbox cmd, if another mbox cmd is active */
if (mbp->mb_cbfn == NULL) {
rv = -EBUSY;
csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
hw->pfn, *((uint8_t *)mbp->mb));
goto error_out;
} else {
list_add_tail(&mbp->list, &mbm->req_q);
CSIO_INC_STATS(mbm, n_activeq);
return 0;
}
}
/* Now get ownership of mailbox */
owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
if (!csio_mb_is_host_owner(owner)) {
for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
/*
* Mailbox unavailable. In immediate mode, fail the command.
* In other modes, enqueue the request.
*/
if (!csio_mb_is_host_owner(owner)) {
if (mbp->mb_cbfn == NULL) {
rv = owner ? -EBUSY : -ETIMEDOUT;
csio_dbg(hw,
"Couldnt own Mailbox %x op:0x%x "
"owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb), owner);
goto error_out;
} else {
if (mbm->mcurrent == NULL) {
csio_err(hw,
"Couldnt own Mailbox %x "
"op:0x%x owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb),
owner);
csio_err(hw,
"No outstanding driver"
" mailbox as well\n");
goto error_out;
}
}
}
}
/* Mailbox is available, copy mailbox data into it */
for (i = 0; i < size; i += 8) {
csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
cmd++;
}
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
/* Start completion timers in non-immediate modes and notify FW */
if (mbp->mb_cbfn != NULL) {
mbm->mcurrent = mbp;
mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
} else
csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
ctl_reg);
/* Flush posted writes */
csio_rd_reg32(hw, ctl_reg);
wmb();
CSIO_INC_STATS(mbm, n_req);
if (mbp->mb_cbfn)
return 0;
/* Poll for completion in immediate mode */
cmd = mbp->mb;
for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
mdelay(CSIO_MB_POLL_FREQ);
/* Check for response */
ctl = csio_rd_reg32(hw, ctl_reg);
if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
if (!(ctl & MBMSGVALID)) {
csio_wr_reg32(hw, 0, ctl_reg);
continue;
}
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
fw_hdr = (struct fw_cmd_hdr *)&hdr;
switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
case FW_DEBUG_CMD:
csio_mb_debug_cmd_handler(hw);
continue;
}
/* Copy response */
for (i = 0; i < size; i += 8)
*cmd++ = cpu_to_be64(csio_rd_reg64
(hw, data_reg + i));
csio_wr_reg32(hw, 0, ctl_reg);
if (FW_CMD_RETVAL_GET(*(mbp->mb)))
CSIO_INC_STATS(mbm, n_err);
CSIO_INC_STATS(mbm, n_rsp);
return 0;
}
}
CSIO_INC_STATS(mbm, n_tmo);
csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
hw->pfn, *((uint8_t *)cmd));
return -ETIMEDOUT;
error_out:
CSIO_INC_STATS(mbm, n_err);
return rv;
}
/*
* csio_mb_completions - Completion handler for Mailbox commands
* @hw: The HW structure
* @cbfn_q: Completion queue.
*
*/
void
csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
{
struct csio_mb *mbp;
struct csio_mbm *mbm = &hw->mbm;
enum fw_retval rv;
while (!list_empty(cbfn_q)) {
mbp = list_first_entry(cbfn_q, struct csio_mb, list);
list_del_init(&mbp->list);
rv = csio_mb_fw_retval(mbp);
if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
CSIO_INC_STATS(mbm, n_err);
else if (rv != FW_HOSTERROR)
CSIO_INC_STATS(mbm, n_rsp);
if (mbp->mb_cbfn)
mbp->mb_cbfn(hw, mbp);
}
}
static void
csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
{
static char *mod_str[] = {
NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
};
struct csio_pport *port = &hw->pport[port_id];
if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
csio_info(hw, "Port:%d - port module unplugged\n", port_id);
else if (port->mod_type < ARRAY_SIZE(mod_str))
csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
mod_str[port->mod_type]);
else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
csio_info(hw,
"Port:%d - unsupported optical port module "
"inserted\n", port_id);
else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
csio_info(hw,
"Port:%d - unknown port module inserted, forcing "
"TWINAX\n", port_id);
else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
csio_info(hw, "Port:%d - transceiver module error\n", port_id);
else
csio_info(hw, "Port:%d - unknown module type %d inserted\n",
port_id, port->mod_type);
}
int
csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
{
uint8_t opcode = *(uint8_t *)cmd;
struct fw_port_cmd *pcmd;
uint8_t port_id;
uint32_t link_status;
uint16_t action;
uint8_t mod_type;
if (opcode == FW_PORT_CMD) {
pcmd = (struct fw_port_cmd *)cmd;
port_id = FW_PORT_CMD_PORTID_GET(
ntohl(pcmd->op_to_portid));
action = FW_PORT_CMD_ACTION_GET(
ntohl(pcmd->action_to_len16));
if (action != FW_PORT_ACTION_GET_PORT_INFO) {
csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
action);
return -EINVAL;
}
link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);
hw->pport[port_id].link_status =
FW_PORT_CMD_LSTATUS_GET(link_status);
hw->pport[port_id].link_speed =
FW_PORT_CMD_LSPEED_GET(link_status);
csio_info(hw, "Port:%x - LINK %s\n", port_id,
FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");
if (mod_type != hw->pport[port_id].mod_type) {
hw->pport[port_id].mod_type = mod_type;
csio_mb_portmod_changed(hw, port_id);
}
} else if (opcode == FW_DEBUG_CMD) {
csio_mb_dump_fw_dbg(hw, cmd);
} else {
csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
return -EINVAL;
}
return 0;
}
/*
* csio_mb_isr_handler - Handle mailboxes related interrupts.
* @hw: The HW structure
*
* Called from the ISR to handle Mailbox related interrupts.
* HW Lock should be held across this call.
*/
int
csio_mb_isr_handler(struct csio_hw *hw)
{
struct csio_mbm *mbm = &hw->mbm;
struct csio_mb *mbp = mbm->mcurrent;
__be64 *cmd;
uint32_t ctl, cim_cause, pl_cause;
int i;
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
int size;
__be64 hdr;
struct fw_cmd_hdr *fw_hdr;
pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
CSIO_INC_STATS(hw, n_mbint_unexp);
return -EINVAL;
}
/*
* The cause registers below HAVE to be cleared in the SAME
* order as below: The low level cause register followed by
* the upper level cause register. In other words, CIM-cause
* first followed by PL-Cause next.
*/
csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
ctl = csio_rd_reg32(hw, ctl_reg);
if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
if (!(ctl & MBMSGVALID)) {
csio_warn(hw,
"Stray mailbox interrupt recvd,"
" mailbox data not valid\n");
csio_wr_reg32(hw, 0, ctl_reg);
/* Flush */
csio_rd_reg32(hw, ctl_reg);
return -EINVAL;
}
hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
fw_hdr = (struct fw_cmd_hdr *)&hdr;
switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
case FW_DEBUG_CMD:
csio_mb_debug_cmd_handler(hw);
return -EINVAL;
#if 0
case FW_ERROR_CMD:
case FW_INITIALIZE_CMD: /* When we are not master */
#endif
}
CSIO_ASSERT(mbp != NULL);
cmd = mbp->mb;
size = mbp->mb_size;
/* Get response */
for (i = 0; i < size; i += 8)
*cmd++ = cpu_to_be64(csio_rd_reg64
(hw, data_reg + i));
csio_wr_reg32(hw, 0, ctl_reg);
/* Flush */
csio_rd_reg32(hw, ctl_reg);
mbm->mcurrent = NULL;
/* Add completion to tail of cbfn queue */
list_add_tail(&mbp->list, &mbm->cbfn_q);
CSIO_INC_STATS(mbm, n_cbfnq);
/*
* Enqueue event to EventQ. Events processing happens
* in Event worker thread context
*/
if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
CSIO_INC_STATS(hw, n_evt_drop);
return 0;
} else {
/*
* We can get here if mailbox MSIX vector is shared,
* or in INTx case. Or a stray interrupt.
*/
csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
CSIO_INC_STATS(hw, n_int_stray);
return -EINVAL;
}
}
/*
* csio_mb_tmo_handler - Timeout handler
* @hw: The HW structure
*
*/
struct csio_mb *
csio_mb_tmo_handler(struct csio_hw *hw)
{
struct csio_mbm *mbm = &hw->mbm;
struct csio_mb *mbp = mbm->mcurrent;
struct fw_cmd_hdr *fw_hdr;
/*
* Could be a race b/w the completion handler and the timer
* and the completion handler won that race.
*/
if (mbp == NULL) {
CSIO_DB_ASSERT(0);
return NULL;
}
fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
FW_CMD_OP_GET(ntohl(fw_hdr->hi)));
mbm->mcurrent = NULL;
CSIO_INC_STATS(mbm, n_tmo);
fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT));
return mbp;
}
/*
* csio_mb_cancel_all - Cancel all waiting commands.
* @hw: The HW structure
* @cbfn_q: The callback queue.
*
* Caller should hold hw lock across this call.
*/
void
csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
{
struct csio_mb *mbp;
struct csio_mbm *mbm = &hw->mbm;
struct fw_cmd_hdr *hdr;
struct list_head *tmp;
if (mbm->mcurrent) {
mbp = mbm->mcurrent;
/* Stop mailbox completion timer */
del_timer_sync(&mbm->timer);
/* Add completion to tail of cbfn queue */
list_add_tail(&mbp->list, cbfn_q);
mbm->mcurrent = NULL;
}
if (!list_empty(&mbm->req_q)) {
list_splice_tail_init(&mbm->req_q, cbfn_q);
mbm->stats.n_activeq = 0;
}
if (!list_empty(&mbm->cbfn_q)) {
list_splice_tail_init(&mbm->cbfn_q, cbfn_q);
mbm->stats.n_cbfnq = 0;
}
if (list_empty(cbfn_q))
return;
list_for_each(tmp, cbfn_q) {
mbp = (struct csio_mb *)tmp;
hdr = (struct fw_cmd_hdr *)(mbp->mb);
csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi)));
CSIO_INC_STATS(mbm, n_cancel);
hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR));
}
}
/*
* csio_mbm_init - Initialize Mailbox module
* @mbm: Mailbox module
* @hw: The HW structure
* @timer: Timing function for interrupting mailboxes
*
* Initialize timer and the request/response queues.
*/
int
csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
void (*timer_fn)(uintptr_t))
{
struct timer_list *timer = &mbm->timer;
init_timer(timer);
timer->function = timer_fn;
timer->data = (unsigned long)hw;
INIT_LIST_HEAD(&mbm->req_q);
INIT_LIST_HEAD(&mbm->cbfn_q);
csio_set_mb_intr_idx(mbm, -1);
return 0;
}
/*
* csio_mbm_exit - Uninitialize mailbox module
* @mbm: Mailbox module
*
* Stop timer.
*/
void
csio_mbm_exit(struct csio_mbm *mbm)
{
del_timer_sync(&mbm->timer);
CSIO_DB_ASSERT(mbm->mcurrent == NULL);
CSIO_DB_ASSERT(list_empty(&mbm->req_q));
CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
}
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_MB_H__
#define __CSIO_MB_H__
#include <linux/timer.h>
#include <linux/completion.h>
#include "t4fw_api.h"
#include "t4fw_api_stor.h"
#include "csio_defs.h"
#define CSIO_STATS_OFFSET (2)
#define CSIO_NUM_STATS_PER_MB (6)
struct fw_fcoe_port_cmd_params {
uint8_t portid;
uint8_t idx;
uint8_t nstats;
};
#define CSIO_DUMP_MB(__hw, __num, __mb) \
csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \
(unsigned long long)csio_rd_reg64(__hw, __mb), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 8), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 16), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 24), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 32), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 40), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 48), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 56))
#define CSIO_MB_MAX_REGS 8
#define CSIO_MAX_MB_SIZE 64
#define CSIO_MB_POLL_FREQ 5 /* 5 ms */
#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT
/* Device master in HELLO command */
enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST };
enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL };
enum csio_dev_state {
CSIO_DEV_STATE_UNINIT,
CSIO_DEV_STATE_INIT,
CSIO_DEV_STATE_ERR
};
#define FW_PARAM_DEV(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
FW_PARAMS_PARAM_Y(0) | \
FW_PARAMS_PARAM_Z(0))
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \
do { \
if (__clear) \
memset((__cp), 0, \
CSIO_MB_MAX_REGS * sizeof(__be64)); \
INIT_LIST_HEAD(&(__mbp)->list); \
(__mbp)->tmo = (__tmo); \
(__mbp)->priv = (void *)(__priv); \
(__mbp)->mb_cbfn = (__fn); \
(__mbp)->mb_size = sizeof(*(__cp)); \
} while (0)
struct csio_mbm_stats {
uint32_t n_req; /* number of mbox req */
uint32_t n_rsp; /* number of mbox rsp */
uint32_t n_activeq; /* number of mbox req active Q */
uint32_t n_cbfnq; /* number of mbox req cbfn Q */
uint32_t n_tmo; /* number of mbox timeout */
uint32_t n_cancel; /* number of mbox cancel */
uint32_t n_err; /* number of mbox error */
};
/* Driver version of Mailbox */
struct csio_mb {
struct list_head list; /* for req/resp */
/* queue in driver */
__be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */
int mb_size; /* Size of this
* mailbox.
*/
uint32_t tmo; /* Timeout */
struct completion cmplobj; /* MB Completion
* object
*/
void (*mb_cbfn) (struct csio_hw *, struct csio_mb *);
/* Callback fn */
void *priv; /* Owner private ptr */
};
struct csio_mbm {
uint32_t a_mbox; /* Async mbox num */
uint32_t intr_idx; /* Interrupt index */
struct timer_list timer; /* Mbox timer */
struct list_head req_q; /* Mbox request queue */
struct list_head cbfn_q; /* Mbox completion q */
struct csio_mb *mcurrent; /* Current mailbox */
uint32_t req_q_cnt; /* Outstanding mbox
* cmds
*/
struct csio_mbm_stats stats; /* Statistics */
};
#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i))
#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx)
struct csio_iq_params;
struct csio_eq_params;
enum fw_retval csio_mb_fw_retval(struct csio_mb *);
/* MB helpers */
void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t,
uint32_t, uint32_t, enum csio_dev_master,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, enum csio_dev_state *,
uint8_t *);
void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int,
unsigned int, unsigned int, const u32 *, u32 *, bool,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, unsigned int , u32 *);
void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
int reg);
void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,
bool, bool, bool, bool,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_rss_glb_config(struct csio_hw *, struct csio_mb *,
uint32_t, uint8_t, unsigned int,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,
uint8_t, bool, uint32_t, uint16_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, uint16_t *);
void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_iq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, struct csio_iq_params *);
void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_iq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_eq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, struct csio_eq_params *);
void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *,
uint32_t , struct csio_eq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *,
uint32_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t , uint32_t , uint16_t,
uint8_t [8], uint8_t [8],
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t , uint32_t ,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t , uint32_t, uint32_t ,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t, uint32_t,
void (*cbfn) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw,
struct csio_mb *mbp, uint32_t mb_tmo,
struct fw_fcoe_port_cmd_params *portparams,
void (*cbfn)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,
enum fw_retval *retval,
struct fw_fcoe_port_cmd_params *portparams,
struct fw_fcoe_port_stats *portstats);
/* MB module functions */
int csio_mbm_init(struct csio_mbm *, struct csio_hw *,
void (*)(uintptr_t));
void csio_mbm_exit(struct csio_mbm *);
void csio_mb_intr_enable(struct csio_hw *);
void csio_mb_intr_disable(struct csio_hw *);
int csio_mb_issue(struct csio_hw *, struct csio_mb *);
void csio_mb_completions(struct csio_hw *, struct list_head *);
int csio_mb_fwevt_handler(struct csio_hw *, __be64 *);
int csio_mb_isr_handler(struct csio_hw *);
struct csio_mb *csio_mb_tmo_handler(struct csio_hw *);
void csio_mb_cancel_all(struct csio_hw *, struct list_head *);
#endif /* ifndef __CSIO_MB_H__ */
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fs.h>
#include "csio_hw.h"
#include "csio_lnode.h"
#include "csio_rnode.h"
static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
static void csio_rnode_exit(struct csio_rnode *);
/* Static machine forward declarations */
static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
/* RNF event mapping */
static enum csio_rn_ev fwevt_to_rnevt[] = {
CSIO_RNFE_NONE, /* None */
CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
CSIO_RNFE_NONE, /* PRLI_TMO */
CSIO_RNFE_NONE, /* ADISC_TMO */
CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
CSIO_RNFE_NONE, /* LOGO_SNT */
CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
};
#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
CSIO_RNFE_NONE : \
fwevt_to_rnevt[_evt])
int
csio_is_rnode_ready(struct csio_rnode *rn)
{
return csio_match_state(rn, csio_rns_ready);
}
static int
csio_is_rnode_uninit(struct csio_rnode *rn)
{
return csio_match_state(rn, csio_rns_uninit);
}
static int
csio_is_rnode_wka(uint8_t rport_type)
{
if ((rport_type == FLOGI_VFPORT) ||
(rport_type == FDISC_VFPORT) ||
(rport_type == NS_VNPORT) ||
(rport_type == FDMI_VNPORT))
return 1;
return 0;
}
/*
* csio_rn_lookup - Finds the rnode with the given flowid
* @ln - lnode
* @flowid - flowid.
*
* Does the rnode lookup on the given lnode and flowid.If no matching entry
* found, NULL is returned.
*/
static struct csio_rnode *
csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
{
struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
struct list_head *tmp;
struct csio_rnode *rn;
list_for_each(tmp, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
if (rn->flowid == flowid)
return rn;
}
return NULL;
}
/*
* csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
* @ln: lnode
* @wwpn: wwpn
*
* Does the rnode lookup on the given lnode and wwpn. If no matching entry
* found, NULL is returned.
*/
static struct csio_rnode *
csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
{
struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
struct list_head *tmp;
struct csio_rnode *rn;
list_for_each(tmp, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
return rn;
}
return NULL;
}
/**
* csio_rnode_lookup_portid - Finds the rnode with the given portid
* @ln: lnode
* @portid: port id
*
* Lookup the rnode list for a given portid. If no matching entry
* found, NULL is returned.
*/
struct csio_rnode *
csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
{
struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
struct list_head *tmp;
struct csio_rnode *rn;
list_for_each(tmp, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
if (rn->nport_id == portid)
return rn;
}
return NULL;
}
static int
csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
uint32_t *vnp_flowid)
{
struct csio_rnode *rnhead;
struct list_head *tmp, *tmp1;
struct csio_rnode *rn;
struct csio_lnode *ln_tmp;
struct csio_hw *hw = csio_lnode_to_hw(ln);
list_for_each(tmp1, &hw->sln_head) {
ln_tmp = (struct csio_lnode *) tmp1;
if (ln_tmp == ln)
continue;
rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
list_for_each(tmp, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
if (csio_is_rnode_ready(rn)) {
if (rn->flowid == rdev_flowid) {
*vnp_flowid = csio_ln_flowid(ln_tmp);
return 1;
}
}
}
}
return 0;
}
static struct csio_rnode *
csio_alloc_rnode(struct csio_lnode *ln)
{
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
if (!rn)
goto err;
memset(rn, 0, sizeof(struct csio_rnode));
if (csio_rnode_init(rn, ln))
goto err_free;
CSIO_INC_STATS(ln, n_rnode_alloc);
return rn;
err_free:
mempool_free(rn, hw->rnode_mempool);
err:
CSIO_INC_STATS(ln, n_rnode_nomem);
return NULL;
}
static void
csio_free_rnode(struct csio_rnode *rn)
{
struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
csio_rnode_exit(rn);
CSIO_INC_STATS(rn->lnp, n_rnode_free);
mempool_free(rn, hw->rnode_mempool);
}
/*
* csio_get_rnode - Gets rnode with the given flowid
* @ln - lnode
* @flowid - flow id.
*
* Does the rnode lookup on the given lnode and flowid. If no matching
* rnode found, then new rnode with given npid is allocated and returned.
*/
static struct csio_rnode *
csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
{
struct csio_rnode *rn;
rn = csio_rn_lookup(ln, flowid);
if (!rn) {
rn = csio_alloc_rnode(ln);
if (!rn)
return NULL;
rn->flowid = flowid;
}
return rn;
}
/*
* csio_put_rnode - Frees the given rnode
* @ln - lnode
* @flowid - flow id.
*
* Does the rnode lookup on the given lnode and flowid. If no matching
* rnode found, then new rnode with given npid is allocated and returned.
*/
void
csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
{
CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
csio_free_rnode(rn);
}
/*
* csio_confirm_rnode - confirms rnode based on wwpn.
* @ln: lnode
* @rdev_flowid: remote device flowid
* @rdevp: remote device params
* This routines searches other rnode in list having same wwpn of new rnode.
* If there is a match, then matched rnode is returned and otherwise new rnode
* is returned.
* returns rnode.
*/
struct csio_rnode *
csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
struct fcoe_rdev_entry *rdevp)
{
uint8_t rport_type;
struct csio_rnode *rn, *match_rn;
uint32_t vnp_flowid;
uint32_t *port_id;
port_id = (uint32_t *)&rdevp->r_id[0];
rport_type =
FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
/* Drop rdev event for cntrl port */
if (rport_type == FAB_CTLR_VNPORT) {
csio_ln_dbg(ln,
"Unhandled rport_type:%d recv in rdev evt "
"ssni:x%x\n", rport_type, rdev_flowid);
return NULL;
}
/* Lookup on flowid */
rn = csio_rn_lookup(ln, rdev_flowid);
if (!rn) {
/* Drop events with duplicate flowid */
if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
csio_ln_warn(ln,
"ssni:%x already active on vnpi:%x",
rdev_flowid, vnp_flowid);
return NULL;
}
/* Lookup on wwpn for NPORTs */
rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
if (!rn)
goto alloc_rnode;
} else {
/* Lookup well-known ports with nport id */
if (csio_is_rnode_wka(rport_type)) {
match_rn = csio_rnode_lookup_portid(ln,
((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
if (match_rn == NULL) {
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
goto alloc_rnode;
}
/*
* Now compare the wwpn to confirm that
* same port relogged in. If so update the matched rn.
* Else, go ahead and alloc a new rnode.
*/
if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
if (csio_is_rnode_ready(rn)) {
csio_ln_warn(ln,
"rnode is already"
"active ssni:x%x\n",
rdev_flowid);
CSIO_ASSERT(0);
}
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
rn = match_rn;
/* Update rn */
goto found_rnode;
}
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
goto alloc_rnode;
}
/* wwpn match */
if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
goto found_rnode;
/* Search for rnode that have same wwpn */
match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
if (match_rn != NULL) {
csio_ln_dbg(ln,
"ssni:x%x changed for rport name(wwpn):%llx "
"did:x%x\n", rdev_flowid,
wwn_to_u64(rdevp->wwpn),
match_rn->nport_id);
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
rn = match_rn;
} else {
csio_ln_dbg(ln,
"rnode wwpn mismatch found ssni:x%x "
"name(wwpn):%llx\n",
rdev_flowid,
wwn_to_u64(csio_rn_wwpn(rn)));
if (csio_is_rnode_ready(rn)) {
csio_ln_warn(ln,
"rnode is already active "
"wwpn:%llx ssni:x%x\n",
wwn_to_u64(csio_rn_wwpn(rn)),
rdev_flowid);
CSIO_ASSERT(0);
}
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
goto alloc_rnode;
}
}
found_rnode:
csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
/* Update flowid */
csio_rn_flowid(rn) = rdev_flowid;
/* update rdev entry */
rn->rdev_entry = rdevp;
CSIO_INC_STATS(ln, n_rnode_match);
return rn;
alloc_rnode:
rn = csio_get_rnode(ln, rdev_flowid);
if (!rn)
return NULL;
csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
/* update rdev entry */
rn->rdev_entry = rdevp;
return rn;
}
/*
* csio_rn_verify_rparams - verify rparams.
* @ln: lnode
* @rn: rnode
* @rdevp: remote device params
* returns success if rparams are verified.
*/
static int
csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
struct fcoe_rdev_entry *rdevp)
{
uint8_t null[8];
uint8_t rport_type;
uint8_t fc_class;
uint32_t *did;
did = (uint32_t *) &rdevp->r_id[0];
rport_type =
FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
switch (rport_type) {
case FLOGI_VFPORT:
rn->role = CSIO_RNFR_FABRIC;
if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
csio_rn_flowid(rn));
return -EINVAL;
}
/* NPIV support */
if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
ln->flags |= CSIO_LNF_NPIVSUPP;
break;
case NS_VNPORT:
rn->role = CSIO_RNFR_NS;
if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
csio_rn_flowid(rn));
return -EINVAL;
}
break;
case REG_FC4_VNPORT:
case REG_VNPORT:
rn->role = CSIO_RNFR_NPORT;
if (rdevp->event_cause == PRLI_ACC_RCVD ||
rdevp->event_cause == PRLI_RCVD) {
if (FW_RDEV_WR_TASK_RETRY_ID_GET(
rdevp->enh_disc_to_tgt))
rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
rn->fcp_flags |= FCP_SPPF_RETRY;
if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
rn->role |= CSIO_RNFR_TARGET;
if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
rn->role |= CSIO_RNFR_INITIATOR;
}
break;
case FDMI_VNPORT:
case FAB_CTLR_VNPORT:
rn->role = 0;
break;
default:
csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
csio_rn_flowid(rn), rport_type);
return -EINVAL;
}
/* validate wwpn/wwnn for Name server/remote port */
if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
memset(null, 0, 8);
if (!memcmp(rdevp->wwnn, null, 8)) {
csio_ln_err(ln,
"ssni:x%x invalid wwnn received from"
" rport did:x%x\n",
csio_rn_flowid(rn),
(ntohl(*did) & CSIO_DID_MASK));
return -EINVAL;
}
if (!memcmp(rdevp->wwpn, null, 8)) {
csio_ln_err(ln,
"ssni:x%x invalid wwpn received from"
" rport did:x%x\n",
csio_rn_flowid(rn),
(ntohl(*did) & CSIO_DID_MASK));
return -EINVAL;
}
}
/* Copy wwnn, wwpn and nport id */
rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
rn->rn_sparm.csp.sp_bb_data = ntohs(rdevp->rcv_fr_sz);
fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
return 0;
}
static void
__csio_reg_rnode(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_unlock_irq(&hw->lock);
csio_reg_rnode(rn);
spin_lock_irq(&hw->lock);
if (rn->role & CSIO_RNFR_TARGET)
ln->n_scsi_tgts++;
if (rn->nport_id == FC_FID_MGMT_SERV)
csio_ln_fdmi_start(ln, (void *) rn);
}
static void
__csio_unreg_rnode(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
struct csio_hw *hw = csio_lnode_to_hw(ln);
LIST_HEAD(tmp_q);
int cmpl = 0;
if (!list_empty(&rn->host_cmpl_q)) {
csio_dbg(hw, "Returning completion queue I/Os\n");
list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
cmpl = 1;
}
if (rn->role & CSIO_RNFR_TARGET) {
ln->n_scsi_tgts--;
ln->last_scan_ntgts--;
}
spin_unlock_irq(&hw->lock);
csio_unreg_rnode(rn);
spin_lock_irq(&hw->lock);
/* Cleanup I/Os that were waiting for rnode to unregister */
if (cmpl)
csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
}
/*****************************************************************************/
/* START: Rnode SM */
/*****************************************************************************/
/*
* csio_rns_uninit -
* @rn - rnode
* @evt - SM event.
*
*/
static void
csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
int ret = 0;
CSIO_INC_STATS(rn, n_evt_sm[evt]);
switch (evt) {
case CSIO_RNFE_LOGGED_IN:
case CSIO_RNFE_PLOGI_RECV:
ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
if (!ret) {
csio_set_state(&rn->sm, csio_rns_ready);
__csio_reg_rnode(rn);
} else {
CSIO_INC_STATS(rn, n_err_inval);
}
break;
case CSIO_RNFE_LOGO_RECV:
csio_ln_dbg(ln,
"ssni:x%x Ignoring event %d recv "
"in rn state[uninit]\n", csio_rn_flowid(rn), evt);
CSIO_INC_STATS(rn, n_evt_drop);
break;
default:
csio_ln_dbg(ln,
"ssni:x%x unexp event %d recv "
"in rn state[uninit]\n", csio_rn_flowid(rn), evt);
CSIO_INC_STATS(rn, n_evt_unexp);
break;
}
}
/*
* csio_rns_ready -
* @rn - rnode
* @evt - SM event.
*
*/
static void
csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
int ret = 0;
CSIO_INC_STATS(rn, n_evt_sm[evt]);
switch (evt) {
case CSIO_RNFE_LOGGED_IN:
case CSIO_RNFE_PLOGI_RECV:
csio_ln_dbg(ln,
"ssni:x%x Ignoring event %d recv from did:x%x "
"in rn state[ready]\n", csio_rn_flowid(rn), evt,
rn->nport_id);
CSIO_INC_STATS(rn, n_evt_drop);
break;
case CSIO_RNFE_PRLI_DONE:
case CSIO_RNFE_PRLI_RECV:
ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
if (!ret)
__csio_reg_rnode(rn);
else
CSIO_INC_STATS(rn, n_err_inval);
break;
case CSIO_RNFE_DOWN:
csio_set_state(&rn->sm, csio_rns_offline);
__csio_unreg_rnode(rn);
/* FW expected to internally aborted outstanding SCSI WRs
* and return all SCSI WRs to host with status "ABORTED".
*/
break;
case CSIO_RNFE_LOGO_RECV:
csio_set_state(&rn->sm, csio_rns_offline);
__csio_unreg_rnode(rn);
/* FW expected to internally aborted outstanding SCSI WRs
* and return all SCSI WRs to host with status "ABORTED".
*/
break;
case CSIO_RNFE_CLOSE:
/*
* Each rnode receives CLOSE event when driver is removed or
* device is reset
* Note: All outstanding IOs on remote port need to returned
* to uppper layer with appropriate error before sending
* CLOSE event
*/
csio_set_state(&rn->sm, csio_rns_uninit);
__csio_unreg_rnode(rn);
break;
case CSIO_RNFE_NAME_MISSING:
csio_set_state(&rn->sm, csio_rns_disappeared);
__csio_unreg_rnode(rn);
/*
* FW expected to internally aborted outstanding SCSI WRs
* and return all SCSI WRs to host with status "ABORTED".
*/
break;
default:
csio_ln_dbg(ln,
"ssni:x%x unexp event %d recv from did:x%x "
"in rn state[uninit]\n", csio_rn_flowid(rn), evt,
rn->nport_id);
CSIO_INC_STATS(rn, n_evt_unexp);
break;
}
}
/*
* csio_rns_offline -
* @rn - rnode
* @evt - SM event.
*
*/
static void
csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
int ret = 0;
CSIO_INC_STATS(rn, n_evt_sm[evt]);
switch (evt) {
case CSIO_RNFE_LOGGED_IN:
case CSIO_RNFE_PLOGI_RECV:
ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
if (!ret) {
csio_set_state(&rn->sm, csio_rns_ready);
__csio_reg_rnode(rn);
} else {
CSIO_INC_STATS(rn, n_err_inval);
csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
}
break;
case CSIO_RNFE_DOWN:
csio_ln_dbg(ln,
"ssni:x%x Ignoring event %d recv from did:x%x "
"in rn state[offline]\n", csio_rn_flowid(rn), evt,
rn->nport_id);
CSIO_INC_STATS(rn, n_evt_drop);
break;
case CSIO_RNFE_CLOSE:
/* Each rnode receives CLOSE event when driver is removed or
* device is reset
* Note: All outstanding IOs on remote port need to returned
* to uppper layer with appropriate error before sending
* CLOSE event
*/
csio_set_state(&rn->sm, csio_rns_uninit);
break;
case CSIO_RNFE_NAME_MISSING:
csio_set_state(&rn->sm, csio_rns_disappeared);
break;
default:
csio_ln_dbg(ln,
"ssni:x%x unexp event %d recv from did:x%x "
"in rn state[offline]\n", csio_rn_flowid(rn), evt,
rn->nport_id);
CSIO_INC_STATS(rn, n_evt_unexp);
break;
}
}
/*
* csio_rns_disappeared -
* @rn - rnode
* @evt - SM event.
*
*/
static void
csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
int ret = 0;
CSIO_INC_STATS(rn, n_evt_sm[evt]);
switch (evt) {
case CSIO_RNFE_LOGGED_IN:
case CSIO_RNFE_PLOGI_RECV:
ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
if (!ret) {
csio_set_state(&rn->sm, csio_rns_ready);
__csio_reg_rnode(rn);
} else {
CSIO_INC_STATS(rn, n_err_inval);
csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
}
break;
case CSIO_RNFE_CLOSE:
/* Each rnode receives CLOSE event when driver is removed or
* device is reset.
* Note: All outstanding IOs on remote port need to returned
* to uppper layer with appropriate error before sending
* CLOSE event
*/
csio_set_state(&rn->sm, csio_rns_uninit);
break;
case CSIO_RNFE_DOWN:
case CSIO_RNFE_NAME_MISSING:
csio_ln_dbg(ln,
"ssni:x%x Ignoring event %d recv from did x%x"
"in rn state[disappeared]\n", csio_rn_flowid(rn),
evt, rn->nport_id);
break;
default:
csio_ln_dbg(ln,
"ssni:x%x unexp event %d recv from did x%x"
"in rn state[disappeared]\n", csio_rn_flowid(rn),
evt, rn->nport_id);
CSIO_INC_STATS(rn, n_evt_unexp);
break;
}
}
/*****************************************************************************/
/* END: Rnode SM */
/*****************************************************************************/
/*
* csio_rnode_devloss_handler - Device loss event handler
* @rn: rnode
*
* Post event to close rnode SM and free rnode.
*/
void
csio_rnode_devloss_handler(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
/* ignore if same rnode came back as online */
if (csio_is_rnode_ready(rn))
return;
csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
/* Free rn if in uninit state */
if (csio_is_rnode_uninit(rn))
csio_put_rnode(ln, rn);
}
/**
* csio_rnode_fwevt_handler - Event handler for firmware rnode events.
* @rn: rnode
*
*/
void
csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
enum csio_rn_ev evt;
evt = CSIO_FWE_TO_RNFE(fwevt);
if (!evt) {
csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
csio_rn_flowid(rn), fwevt);
CSIO_INC_STATS(rn, n_evt_unexp);
return;
}
CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
/* Track previous & current events for debugging */
rn->prev_evt = rn->cur_evt;
rn->cur_evt = fwevt;
/* Post event to rnode SM */
csio_post_event(&rn->sm, evt);
/* Free rn if in uninit state */
if (csio_is_rnode_uninit(rn))
csio_put_rnode(ln, rn);
}
/*
* csio_rnode_init - Initialize rnode.
* @rn: RNode
* @ln: Associated lnode
*
* Caller is responsible for holding the lock. The lock is required
* to be held for inserting the rnode in ln->rnhead list.
*/
static int
csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
{
csio_rnode_to_lnode(rn) = ln;
csio_init_state(&rn->sm, csio_rns_uninit);
INIT_LIST_HEAD(&rn->host_cmpl_q);
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
/* Add rnode to list of lnodes->rnhead */
list_add_tail(&rn->sm.sm_list, &ln->rnhead);
return 0;
}
static void
csio_rnode_exit(struct csio_rnode *rn)
{
list_del_init(&rn->sm.sm_list);
CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
}
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_RNODE_H__
#define __CSIO_RNODE_H__
#include "csio_defs.h"
/* State machine evets */
enum csio_rn_ev {
CSIO_RNFE_NONE = (uint32_t)0, /* None */
CSIO_RNFE_LOGGED_IN, /* [N/F]Port login
* complete.
*/
CSIO_RNFE_PRLI_DONE, /* PRLI completed */
CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */
CSIO_RNFE_PRLI_RECV, /* Received PLOGI */
CSIO_RNFE_LOGO_RECV, /* Received LOGO */
CSIO_RNFE_PRLO_RECV, /* Received PRLO */
CSIO_RNFE_DOWN, /* Rnode is down */
CSIO_RNFE_CLOSE, /* Close rnode */
CSIO_RNFE_NAME_MISSING, /* Rnode name missing
* in name server.
*/
CSIO_RNFE_MAX_EVENT,
};
/* rnode stats */
struct csio_rnode_stats {
uint32_t n_err; /* error */
uint32_t n_err_inval; /* invalid parameter */
uint32_t n_err_nomem; /* error nomem */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* unexpected event */
uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
uint32_t n_lun_rst; /* Number of resets of
* of LUNs under this
* target
*/
uint32_t n_lun_rst_fail; /* Number of LUN reset
* failures.
*/
uint32_t n_tgt_rst; /* Number of target resets */
uint32_t n_tgt_rst_fail; /* Number of target reset
* failures.
*/
};
/* Defines for rnode role */
#define CSIO_RNFR_INITIATOR 0x1
#define CSIO_RNFR_TARGET 0x2
#define CSIO_RNFR_FABRIC 0x4
#define CSIO_RNFR_NS 0x8
#define CSIO_RNFR_NPORT 0x10
struct csio_rnode {
struct csio_sm sm; /* State machine -
* should be the
* 1st member
*/
struct csio_lnode *lnp; /* Pointer to owning
* Lnode */
uint32_t flowid; /* Firmware ID */
struct list_head host_cmpl_q; /* SCSI IOs
* pending to completed
* to Mid-layer.
*/
/* FC identifiers for remote node */
uint32_t nport_id;
uint16_t fcp_flags; /* FCP Flags */
uint8_t cur_evt; /* Current event */
uint8_t prev_evt; /* Previous event */
uint32_t role; /* Fabric/Target/
* Initiator/NS
*/
struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */
struct csio_service_parms rn_sparm;
/* FC transport attributes */
struct fc_rport *rport; /* FC transport rport */
uint32_t supp_classes; /* Supported FC classes */
uint32_t maxframe_size; /* Max Frame size */
uint32_t scsi_id; /* Transport given SCSI id */
struct csio_rnode_stats stats; /* Common rnode stats */
};
#define csio_rn_flowid(rn) ((rn)->flowid)
#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn)
#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn)
#define csio_rnode_to_lnode(rn) ((rn)->lnp)
int csio_is_rnode_ready(struct csio_rnode *rn);
void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str);
struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t);
struct csio_rnode *csio_confirm_rnode(struct csio_lnode *,
uint32_t, struct fcoe_rdev_entry *);
void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt);
void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);
void csio_reg_rnode(struct csio_rnode *);
void csio_unreg_rnode(struct csio_rnode *);
void csio_rnode_devloss_handler(struct csio_rnode *);
#endif /* ifndef __CSIO_RNODE_H__ */
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <asm/page.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include "csio_hw.h"
#include "csio_lnode.h"
#include "csio_rnode.h"
#include "csio_scsi.h"
#include "csio_init.h"
int csio_scsi_eqsize = 65536;
int csio_scsi_iqlen = 128;
int csio_scsi_ioreqs = 2048;
uint32_t csio_max_scan_tmo;
uint32_t csio_delta_scan_tmo = 5;
int csio_lun_qdepth = 32;
static int csio_ddp_descs = 128;
static int csio_do_abrt_cls(struct csio_hw *,
struct csio_ioreq *, bool);
static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
/*
* csio_scsi_match_io - Match an ioreq with the given SCSI level data.
* @ioreq: The I/O request
* @sld: Level information
*
* Should be called with lock held.
*
*/
static bool
csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
{
struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
switch (sld->level) {
case CSIO_LEV_LUN:
if (scmnd == NULL)
return false;
return ((ioreq->lnode == sld->lnode) &&
(ioreq->rnode == sld->rnode) &&
((uint64_t)scmnd->device->lun == sld->oslun));
case CSIO_LEV_RNODE:
return ((ioreq->lnode == sld->lnode) &&
(ioreq->rnode == sld->rnode));
case CSIO_LEV_LNODE:
return (ioreq->lnode == sld->lnode);
case CSIO_LEV_ALL:
return true;
default:
return false;
}
}
/*
* csio_scsi_gather_active_ios - Gather active I/Os based on level
* @scm: SCSI module
* @sld: Level information
* @dest: The queue where these I/Os have to be gathered.
*
* Should be called with lock held.
*/
static void
csio_scsi_gather_active_ios(struct csio_scsim *scm,
struct csio_scsi_level_data *sld,
struct list_head *dest)
{
struct list_head *tmp, *next;
if (list_empty(&scm->active_q))
return;
/* Just splice the entire active_q into dest */
if (sld->level == CSIO_LEV_ALL) {
list_splice_tail_init(&scm->active_q, dest);
return;
}
list_for_each_safe(tmp, next, &scm->active_q) {
if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
list_del_init(tmp);
list_add_tail(tmp, dest);
}
}
}
static inline bool
csio_scsi_itnexus_loss_error(uint16_t error)
{
switch (error) {
case FW_ERR_LINK_DOWN:
case FW_RDEV_NOT_READY:
case FW_ERR_RDEV_LOST:
case FW_ERR_RDEV_LOGO:
case FW_ERR_RDEV_IMPL_LOGO:
return 1;
}
return 0;
}
static inline void
csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq,
uint8_t oq, uint8_t sq)
{
char stag[2];
if (scsi_populate_tag_msg(scmnd, stag)) {
switch (stag[0]) {
case HEAD_OF_QUEUE_TAG:
*tag = hq;
break;
case ORDERED_QUEUE_TAG:
*tag = oq;
break;
default:
*tag = sq;
break;
}
} else
*tag = 0;
}
/*
* csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
* @req: IO req structure.
* @addr: DMA location to place the payload.
*
* This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
*/
static inline void
csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
{
struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
/* Check for Task Management */
if (likely(scmnd->SCp.Message == 0)) {
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
fcp_cmnd->fc_tm_flags = 0;
fcp_cmnd->fc_cmdref = 0;
fcp_cmnd->fc_pri_ta = 0;
memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta,
FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE);
fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
if (req->nsge)
if (req->datadir == DMA_TO_DEVICE)
fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
else
fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
else
fcp_cmnd->fc_flags = 0;
} else {
memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
}
}
/*
* csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
* @req: IO req structure.
* @addr: DMA location to place the payload.
* @size: Size of WR (including FW WR + immed data + rsp SG entry
*
* Wrapper for populating fw_scsi_cmd_wr.
*/
static inline void
csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_rnode *rn = req->rnode;
struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
struct csio_dma_buf *dma_buf;
uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) |
FW_SCSI_CMD_WR_IMMDLEN(imm));
wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
FW_WR_LEN16(
DIV_ROUND_UP(size, 16)));
wr->cookie = (uintptr_t) req;
wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
wr->tmo_val = (uint8_t) req->tmo;
wr->r3 = 0;
memset(&wr->r5, 0, 8);
/* Get RSP DMA buffer */
dma_buf = &req->dma_buf;
/* Prepare RSP SGL */
wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
wr->r6 = 0;
wr->u.fcoe.ctl_pri = 0;
wr->u.fcoe.cp_en_class = 0;
wr->u.fcoe.r4_lo[0] = 0;
wr->u.fcoe.r4_lo[1] = 0;
/* Frame a FCP command */
csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
sizeof(struct fw_scsi_cmd_wr)));
}
#define CSIO_SCSI_CMD_WR_SZ(_imm) \
(sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \
ALIGN((_imm), 16)) /* Immed data */
#define CSIO_SCSI_CMD_WR_SZ_16(_imm) \
(ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
/*
* csio_scsi_cmd - Create a SCSI CMD WR.
* @req: IO req structure.
*
* Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
*
*/
static inline void
csio_scsi_cmd(struct csio_ioreq *req)
{
struct csio_wr_pair wrp;
struct csio_hw *hw = req->lnode->hwp;
struct csio_scsim *scsim = csio_hw_to_scsim(hw);
uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
if (unlikely(req->drv_status != 0))
return;
if (wrp.size1 >= size) {
/* Initialize WR in one shot */
csio_scsi_init_cmd_wr(req, wrp.addr1, size);
} else {
uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
/*
* Make a temporary copy of the WR and write back
* the copy into the WR pair.
*/
csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
memcpy(wrp.addr1, tmpwr, wrp.size1);
memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
}
}
/*
* csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
* @hw: HW module
* @req: IO request
* @sgl: ULP TX SGL pointer.
*
*/
static inline void
csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
struct ulptx_sgl *sgl)
{
struct ulptx_sge_pair *sge_pair = NULL;
struct scatterlist *sgel;
uint32_t i = 0;
uint32_t xfer_len;
struct list_head *tmp;
struct csio_dma_buf *dma_buf;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE |
ULPTX_NSGE(req->nsge));
/* Now add the data SGLs */
if (likely(!req->dcopy)) {
scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
if (i == 0) {
sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
continue;
}
if ((i - 1) & 0x1) {
sge_pair->addr[1] = cpu_to_be64(
sg_dma_address(sgel));
sge_pair->len[1] = cpu_to_be32(
sg_dma_len(sgel));
sge_pair++;
} else {
sge_pair->addr[0] = cpu_to_be64(
sg_dma_address(sgel));
sge_pair->len[0] = cpu_to_be32(
sg_dma_len(sgel));
}
}
} else {
/* Program sg elements with driver's DDP buffer */
xfer_len = scsi_bufflen(scmnd);
list_for_each(tmp, &req->gen_list) {
dma_buf = (struct csio_dma_buf *)tmp;
if (i == 0) {
sgl->addr0 = cpu_to_be64(dma_buf->paddr);
sgl->len0 = cpu_to_be32(
min(xfer_len, dma_buf->len));
sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
} else if ((i - 1) & 0x1) {
sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
sge_pair->len[1] = cpu_to_be32(
min(xfer_len, dma_buf->len));
sge_pair++;
} else {
sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
sge_pair->len[0] = cpu_to_be32(
min(xfer_len, dma_buf->len));
}
xfer_len -= min(xfer_len, dma_buf->len);
i++;
}
}
}
/*
* csio_scsi_init_read_wr - Initialize the READ SCSI WR.
* @req: IO req structure.
* @wrp: DMA location to place the payload.
* @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
*
* Wrapper for populating fw_scsi_read_wr.
*/
static inline void
csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_rnode *rn = req->rnode;
struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
struct ulptx_sgl *sgl;
struct csio_dma_buf *dma_buf;
uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) |
FW_SCSI_READ_WR_IMMDLEN(imm));
wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
wr->cookie = (uintptr_t)req;
wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
wr->tmo_val = (uint8_t)(req->tmo);
wr->use_xfer_cnt = 1;
wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
/* Get RSP DMA buffer */
dma_buf = &req->dma_buf;
/* Prepare RSP SGL */
wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
wr->r4 = 0;
wr->u.fcoe.ctl_pri = 0;
wr->u.fcoe.cp_en_class = 0;
wr->u.fcoe.r3_lo[0] = 0;
wr->u.fcoe.r3_lo[1] = 0;
csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
sizeof(struct fw_scsi_read_wr)));
/* Move WR pointer past command and immediate data */
sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
/* Fill in the DSGL */
csio_scsi_init_ultptx_dsgl(hw, req, sgl);
}
/*
* csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
* @req: IO req structure.
* @wrp: DMA location to place the payload.
* @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
*
* Wrapper for populating fw_scsi_write_wr.
*/
static inline void
csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_rnode *rn = req->rnode;
struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
struct ulptx_sgl *sgl;
struct csio_dma_buf *dma_buf;
uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) |
FW_SCSI_WRITE_WR_IMMDLEN(imm));
wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
wr->cookie = (uintptr_t)req;
wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
wr->tmo_val = (uint8_t)(req->tmo);
wr->use_xfer_cnt = 1;
wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
/* Get RSP DMA buffer */
dma_buf = &req->dma_buf;
/* Prepare RSP SGL */
wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
wr->r4 = 0;
wr->u.fcoe.ctl_pri = 0;
wr->u.fcoe.cp_en_class = 0;
wr->u.fcoe.r3_lo[0] = 0;
wr->u.fcoe.r3_lo[1] = 0;
csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
sizeof(struct fw_scsi_write_wr)));
/* Move WR pointer past command and immediate data */
sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
/* Fill in the DSGL */
csio_scsi_init_ultptx_dsgl(hw, req, sgl);
}
/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \
do { \
(sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \
ALIGN((imm), 16) + /* Immed data */ \
sizeof(struct ulptx_sgl); /* ulptx_sgl */ \
\
if (unlikely((req)->nsge > 1)) \
(sz) += (sizeof(struct ulptx_sge_pair) * \
(ALIGN(((req)->nsge - 1), 2) / 2)); \
/* Data SGE */ \
} while (0)
/*
* csio_scsi_read - Create a SCSI READ WR.
* @req: IO req structure.
*
* Gets a WR slot in the ingress queue and initializes it with
* SCSI READ WR.
*
*/
static inline void
csio_scsi_read(struct csio_ioreq *req)
{
struct csio_wr_pair wrp;
uint32_t size;
struct csio_hw *hw = req->lnode->hwp;
struct csio_scsim *scsim = csio_hw_to_scsim(hw);
CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
size = ALIGN(size, 16);
req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
if (likely(req->drv_status == 0)) {
if (likely(wrp.size1 >= size)) {
/* Initialize WR in one shot */
csio_scsi_init_read_wr(req, wrp.addr1, size);
} else {
uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
/*
* Make a temporary copy of the WR and write back
* the copy into the WR pair.
*/
csio_scsi_init_read_wr(req, (void *)tmpwr, size);
memcpy(wrp.addr1, tmpwr, wrp.size1);
memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
}
}
}
/*
* csio_scsi_write - Create a SCSI WRITE WR.
* @req: IO req structure.
*
* Gets a WR slot in the ingress queue and initializes it with
* SCSI WRITE WR.
*
*/
static inline void
csio_scsi_write(struct csio_ioreq *req)
{
struct csio_wr_pair wrp;
uint32_t size;
struct csio_hw *hw = req->lnode->hwp;
struct csio_scsim *scsim = csio_hw_to_scsim(hw);
CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
size = ALIGN(size, 16);
req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
if (likely(req->drv_status == 0)) {
if (likely(wrp.size1 >= size)) {
/* Initialize WR in one shot */
csio_scsi_init_write_wr(req, wrp.addr1, size);
} else {
uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
/*
* Make a temporary copy of the WR and write back
* the copy into the WR pair.
*/
csio_scsi_init_write_wr(req, (void *)tmpwr, size);
memcpy(wrp.addr1, tmpwr, wrp.size1);
memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
}
}
}
/*
* csio_setup_ddp - Setup DDP buffers for Read request.
* @req: IO req structure.
*
* Checks SGLs/Data buffers are virtually contiguous required for DDP.
* If contiguous,driver posts SGLs in the WR otherwise post internal
* buffers for such request for DDP.
*/
static inline void
csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
{
#ifdef __CSIO_DEBUG__
struct csio_hw *hw = req->lnode->hwp;
#endif
struct scatterlist *sgel = NULL;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
uint64_t sg_addr = 0;
uint32_t ddp_pagesz = 4096;
uint32_t buf_off;
struct csio_dma_buf *dma_buf = NULL;
uint32_t alloc_len = 0;
uint32_t xfer_len = 0;
uint32_t sg_len = 0;
uint32_t i;
scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
sg_addr = sg_dma_address(sgel);
sg_len = sg_dma_len(sgel);
buf_off = sg_addr & (ddp_pagesz - 1);
/* Except 1st buffer,all buffer addr have to be Page aligned */
if (i != 0 && buf_off) {
csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
sg_addr, sg_len);
goto unaligned;
}
/* Except last buffer,all buffer must end on page boundary */
if ((i != (req->nsge - 1)) &&
((buf_off + sg_len) & (ddp_pagesz - 1))) {
csio_dbg(hw,
"SGL addr not ending on page boundary"
"(%llx:%d)\n", sg_addr, sg_len);
goto unaligned;
}
}
/* SGL's are virtually contiguous. HW will DDP to SGLs */
req->dcopy = 0;
csio_scsi_read(req);
return;
unaligned:
CSIO_INC_STATS(scsim, n_unaligned);
/*
* For unaligned SGLs, driver will allocate internal DDP buffer.
* Once command is completed data from DDP buffer copied to SGLs
*/
req->dcopy = 1;
/* Use gen_list to store the DDP buffers */
INIT_LIST_HEAD(&req->gen_list);
xfer_len = scsi_bufflen(scmnd);
i = 0;
/* Allocate ddp buffers for this request */
while (alloc_len < xfer_len) {
dma_buf = csio_get_scsi_ddp(scsim);
if (dma_buf == NULL || i > scsim->max_sge) {
req->drv_status = -EBUSY;
break;
}
alloc_len += dma_buf->len;
/* Added to IO req */
list_add_tail(&dma_buf->list, &req->gen_list);
i++;
}
if (!req->drv_status) {
/* set number of ddp bufs used */
req->nsge = i;
csio_scsi_read(req);
return;
}
/* release dma descs */
if (i > 0)
csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
}
/*
* csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
* @req: IO req structure.
* @addr: DMA location to place the payload.
* @size: Size of WR
* @abort: abort OR close
*
* Wrapper for populating fw_scsi_cmd_wr.
*/
static inline void
csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
bool abort)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_rnode *rn = req->rnode;
struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR));
wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
FW_WR_LEN16(
DIV_ROUND_UP(size, 16)));
wr->cookie = (uintptr_t) req;
wr->iqid = (uint16_t)cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
wr->tmo_val = (uint8_t) req->tmo;
/* 0 for CHK_ALL_IO tells FW to look up t_cookie */
wr->sub_opcode_to_chk_all_io =
(FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
wr->r3[0] = 0;
wr->r3[1] = 0;
wr->r3[2] = 0;
wr->r3[3] = 0;
/* Since we re-use the same ioreq for abort as well */
wr->t_cookie = (uintptr_t) req;
}
static inline void
csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
{
struct csio_wr_pair wrp;
struct csio_hw *hw = req->lnode->hwp;
uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
if (req->drv_status != 0)
return;
if (wrp.size1 >= size) {
/* Initialize WR in one shot */
csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
} else {
uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
/*
* Make a temporary copy of the WR and write back
* the copy into the WR pair.
*/
csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
memcpy(wrp.addr1, tmpwr, wrp.size1);
memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
}
}
/*****************************************************************************/
/* START: SCSI SM */
/*****************************************************************************/
static void
csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_scsim *scsim = csio_hw_to_scsim(hw);
switch (evt) {
case CSIO_SCSIE_START_IO:
if (req->nsge) {
if (req->datadir == DMA_TO_DEVICE) {
req->dcopy = 0;
csio_scsi_write(req);
} else
csio_setup_ddp(scsim, req);
} else {
csio_scsi_cmd(req);
}
if (likely(req->drv_status == 0)) {
/* change state and enqueue on active_q */
csio_set_state(&req->sm, csio_scsis_io_active);
list_add_tail(&req->sm.sm_list, &scsim->active_q);
csio_wr_issue(hw, req->eq_idx, false);
CSIO_INC_STATS(scsim, n_active);
return;
}
break;
case CSIO_SCSIE_START_TM:
csio_scsi_cmd(req);
if (req->drv_status == 0) {
/*
* NOTE: We collect the affected I/Os prior to issuing
* LUN reset, and not after it. This is to prevent
* aborting I/Os that get issued after the LUN reset,
* but prior to LUN reset completion (in the event that
* the host stack has not blocked I/Os to a LUN that is
* being reset.
*/
csio_set_state(&req->sm, csio_scsis_tm_active);
list_add_tail(&req->sm.sm_list, &scsim->active_q);
csio_wr_issue(hw, req->eq_idx, false);
CSIO_INC_STATS(scsim, n_tm_active);
}
return;
case CSIO_SCSIE_ABORT:
case CSIO_SCSIE_CLOSE:
/*
* NOTE:
* We could get here due to :
* - a window in the cleanup path of the SCSI module
* (csio_scsi_abort_io()). Please see NOTE in this function.
* - a window in the time we tried to issue an abort/close
* of a request to FW, and the FW completed the request
* itself.
* Print a message for now, and return INVAL either way.
*/
req->drv_status = -EINVAL;
csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
static void
csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_scsim *scm = csio_hw_to_scsim(hw);
struct csio_rnode *rn;
switch (evt) {
case CSIO_SCSIE_COMPLETED:
CSIO_DEC_STATS(scm, n_active);
list_del_init(&req->sm.sm_list);
csio_set_state(&req->sm, csio_scsis_uninit);
/*
* In MSIX mode, with multiple queues, the SCSI compeltions
* could reach us sooner than the FW events sent to indicate
* I-T nexus loss (link down, remote device logo etc). We
* dont want to be returning such I/Os to the upper layer
* immediately, since we wouldnt have reported the I-T nexus
* loss itself. This forces us to serialize such completions
* with the reporting of the I-T nexus loss. Therefore, we
* internally queue up such up such completions in the rnode.
* The reporting of I-T nexus loss to the upper layer is then
* followed by the returning of I/Os in this internal queue.
* Having another state alongwith another queue helps us take
* actions for events such as ABORT received while we are
* in this rnode queue.
*/
if (unlikely(req->wr_status != FW_SUCCESS)) {
rn = req->rnode;
/*
* FW says remote device is lost, but rnode
* doesnt reflect it.
*/
if (csio_scsi_itnexus_loss_error(req->wr_status) &&
csio_is_rnode_ready(rn)) {
csio_set_state(&req->sm,
csio_scsis_shost_cmpl_await);
list_add_tail(&req->sm.sm_list,
&rn->host_cmpl_q);
}
}
break;
case CSIO_SCSIE_ABORT:
csio_scsi_abrt_cls(req, SCSI_ABORT);
if (req->drv_status == 0) {
csio_wr_issue(hw, req->eq_idx, false);
csio_set_state(&req->sm, csio_scsis_aborting);
}
break;
case CSIO_SCSIE_CLOSE:
csio_scsi_abrt_cls(req, SCSI_CLOSE);
if (req->drv_status == 0) {
csio_wr_issue(hw, req->eq_idx, false);
csio_set_state(&req->sm, csio_scsis_closing);
}
break;
case CSIO_SCSIE_DRVCLEANUP:
req->wr_status = FW_HOSTERROR;
CSIO_DEC_STATS(scm, n_active);
csio_set_state(&req->sm, csio_scsis_uninit);
break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
static void
csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_scsim *scm = csio_hw_to_scsim(hw);
switch (evt) {
case CSIO_SCSIE_COMPLETED:
CSIO_DEC_STATS(scm, n_tm_active);
list_del_init(&req->sm.sm_list);
csio_set_state(&req->sm, csio_scsis_uninit);
break;
case CSIO_SCSIE_ABORT:
csio_scsi_abrt_cls(req, SCSI_ABORT);
if (req->drv_status == 0) {
csio_wr_issue(hw, req->eq_idx, false);
csio_set_state(&req->sm, csio_scsis_aborting);
}
break;
case CSIO_SCSIE_CLOSE:
csio_scsi_abrt_cls(req, SCSI_CLOSE);
if (req->drv_status == 0) {
csio_wr_issue(hw, req->eq_idx, false);
csio_set_state(&req->sm, csio_scsis_closing);
}
break;
case CSIO_SCSIE_DRVCLEANUP:
req->wr_status = FW_HOSTERROR;
CSIO_DEC_STATS(scm, n_tm_active);
csio_set_state(&req->sm, csio_scsis_uninit);
break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
static void
csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_scsim *scm = csio_hw_to_scsim(hw);
switch (evt) {
case CSIO_SCSIE_COMPLETED:
csio_dbg(hw,
"ioreq %p recvd cmpltd (wr_status:%d) "
"in aborting st\n", req, req->wr_status);
/*
* Use -ECANCELED to explicitly tell the ABORTED event that
* the original I/O was returned to driver by FW.
* We dont really care if the I/O was returned with success by
* FW (because the ABORT and completion of the I/O crossed each
* other), or any other return value. Once we are in aborting
* state, the success or failure of the I/O is unimportant to
* us.
*/
req->drv_status = -ECANCELED;
break;
case CSIO_SCSIE_ABORT:
CSIO_INC_STATS(scm, n_abrt_dups);
break;
case CSIO_SCSIE_ABORTED:
csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
req, req->wr_status, req->drv_status);
/*
* Check if original I/O WR completed before the Abort
* completion.
*/
if (req->drv_status != -ECANCELED) {
csio_warn(hw,
"Abort completed before original I/O,"
" req:%p\n", req);
CSIO_DB_ASSERT(0);
}
/*
* There are the following possible scenarios:
* 1. The abort completed successfully, FW returned FW_SUCCESS.
* 2. The completion of an I/O and the receipt of
* abort for that I/O by the FW crossed each other.
* The FW returned FW_EINVAL. The original I/O would have
* returned with FW_SUCCESS or any other SCSI error.
* 3. The FW couldnt sent the abort out on the wire, as there
* was an I-T nexus loss (link down, remote device logged
* out etc). FW sent back an appropriate IT nexus loss status
* for the abort.
* 4. FW sent an abort, but abort timed out (remote device
* didnt respond). FW replied back with
* FW_SCSI_ABORT_TIMEDOUT.
* 5. FW couldnt genuinely abort the request for some reason,
* and sent us an error.
*
* The first 3 scenarios are treated as succesful abort
* operations by the host, while the last 2 are failed attempts
* to abort. Manipulate the return value of the request
* appropriately, so that host can convey these results
* back to the upper layer.
*/
if ((req->wr_status == FW_SUCCESS) ||
(req->wr_status == FW_EINVAL) ||
csio_scsi_itnexus_loss_error(req->wr_status))
req->wr_status = FW_SCSI_ABORT_REQUESTED;
CSIO_DEC_STATS(scm, n_active);
list_del_init(&req->sm.sm_list);
csio_set_state(&req->sm, csio_scsis_uninit);
break;
case CSIO_SCSIE_DRVCLEANUP:
req->wr_status = FW_HOSTERROR;
CSIO_DEC_STATS(scm, n_active);
csio_set_state(&req->sm, csio_scsis_uninit);
break;
case CSIO_SCSIE_CLOSE:
/*
* We can receive this event from the module
* cleanup paths, if the FW forgot to reply to the ABORT WR
* and left this ioreq in this state. For now, just ignore
* the event. The CLOSE event is sent to this state, as
* the LINK may have already gone down.
*/
break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
static void
csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
{
struct csio_hw *hw = req->lnode->hwp;
struct csio_scsim *scm = csio_hw_to_scsim(hw);
switch (evt) {
case CSIO_SCSIE_COMPLETED:
csio_dbg(hw,
"ioreq %p recvd cmpltd (wr_status:%d) "
"in closing st\n", req, req->wr_status);
/*
* Use -ECANCELED to explicitly tell the CLOSED event that
* the original I/O was returned to driver by FW.
* We dont really care if the I/O was returned with success by
* FW (because the CLOSE and completion of the I/O crossed each
* other), or any other return value. Once we are in aborting
* state, the success or failure of the I/O is unimportant to
* us.
*/
req->drv_status = -ECANCELED;
break;
case CSIO_SCSIE_CLOSED:
/*
* Check if original I/O WR completed before the Close
* completion.
*/
if (req->drv_status != -ECANCELED) {
csio_fatal(hw,
"Close completed before original I/O,"
" req:%p\n", req);
CSIO_DB_ASSERT(0);
}
/*
* Either close succeeded, or we issued close to FW at the
* same time FW compelted it to us. Either way, the I/O
* is closed.
*/
CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
(req->wr_status == FW_EINVAL));
req->wr_status = FW_SCSI_CLOSE_REQUESTED;
CSIO_DEC_STATS(scm, n_active);
list_del_init(&req->sm.sm_list);
csio_set_state(&req->sm, csio_scsis_uninit);
break;
case CSIO_SCSIE_CLOSE:
break;
case CSIO_SCSIE_DRVCLEANUP:
req->wr_status = FW_HOSTERROR;
CSIO_DEC_STATS(scm, n_active);
csio_set_state(&req->sm, csio_scsis_uninit);
break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
static void
csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
{
switch (evt) {
case CSIO_SCSIE_ABORT:
case CSIO_SCSIE_CLOSE:
/*
* Just succeed the abort request, and hope that
* the remote device unregister path will cleanup
* this I/O to the upper layer within a sane
* amount of time.
*/
/*
* A close can come in during a LINK DOWN. The FW would have
* returned us the I/O back, but not the remote device lost
* FW event. In this interval, if the I/O times out at the upper
* layer, a close can come in. Take the same action as abort:
* return success, and hope that the remote device unregister
* path will cleanup this I/O. If the FW still doesnt send
* the msg, the close times out, and the upper layer resorts
* to the next level of error recovery.
*/
req->drv_status = 0;
break;
case CSIO_SCSIE_DRVCLEANUP:
csio_set_state(&req->sm, csio_scsis_uninit);
break;
default:
csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
evt, req);
CSIO_DB_ASSERT(0);
}
}
/*
* csio_scsi_cmpl_handler - WR completion handler for SCSI.
* @hw: HW module.
* @wr: The completed WR from the ingress queue.
* @len: Length of the WR.
* @flb: Freelist buffer array.
* @priv: Private object
* @scsiwr: Pointer to SCSI WR.
*
* This is the WR completion handler called per completion from the
* ISR. It is called with lock held. It walks past the RSS and CPL message
* header where the actual WR is present.
* It then gets the status, WR handle (ioreq pointer) and the len of
* the WR, based on WR opcode. Only on a non-good status is the entire
* WR copied into the WR cache (ioreq->fw_wr).
* The ioreq corresponding to the WR is returned to the caller.
* NOTE: The SCSI queue doesnt allocate a freelist today, hence
* no freelist buffer is expected.
*/
struct csio_ioreq *
csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
{
struct csio_ioreq *ioreq = NULL;
struct cpl_fw6_msg *cpl;
uint8_t *tempwr;
uint8_t status;
struct csio_scsim *scm = csio_hw_to_scsim(hw);
/* skip RSS header */
cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
cpl->opcode);
CSIO_INC_STATS(scm, n_inval_cplop);
return NULL;
}
tempwr = (uint8_t *)(cpl->data);
status = csio_wr_status(tempwr);
*scsiwr = tempwr;
if (likely((*tempwr == FW_SCSI_READ_WR) ||
(*tempwr == FW_SCSI_WRITE_WR) ||
(*tempwr == FW_SCSI_CMD_WR))) {
ioreq = (struct csio_ioreq *)((uintptr_t)
(((struct fw_scsi_read_wr *)tempwr)->cookie));
CSIO_DB_ASSERT(virt_addr_valid(ioreq));
ioreq->wr_status = status;
return ioreq;
}
if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
ioreq = (struct csio_ioreq *)((uintptr_t)
(((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
CSIO_DB_ASSERT(virt_addr_valid(ioreq));
ioreq->wr_status = status;
return ioreq;
}
csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
CSIO_INC_STATS(scm, n_inval_scsiop);
return NULL;
}
/*
* csio_scsi_cleanup_io_q - Cleanup the given queue.
* @scm: SCSI module.
* @q: Queue to be cleaned up.
*
* Called with lock held. Has to exit with lock held.
*/
void
csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
{
struct csio_hw *hw = scm->hw;
struct csio_ioreq *ioreq;
struct list_head *tmp, *next;
struct scsi_cmnd *scmnd;
/* Call back the completion routines of the active_q */
list_for_each_safe(tmp, next, q) {
ioreq = (struct csio_ioreq *)tmp;
csio_scsi_drvcleanup(ioreq);
list_del_init(&ioreq->sm.sm_list);
scmnd = csio_scsi_cmnd(ioreq);
spin_unlock_irq(&hw->lock);
/*
* Upper layers may have cleared this command, hence this
* check to avoid accessing stale references.
*/
if (scmnd != NULL)
ioreq->io_cbfn(hw, ioreq);
spin_lock_irq(&scm->freelist_lock);
csio_put_scsi_ioreq(scm, ioreq);
spin_unlock_irq(&scm->freelist_lock);
spin_lock_irq(&hw->lock);
}
}
#define CSIO_SCSI_ABORT_Q_POLL_MS 2000
static void
csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
{
struct csio_lnode *ln = ioreq->lnode;
struct csio_hw *hw = ln->hwp;
int ready = 0;
struct csio_scsim *scsim = csio_hw_to_scsim(hw);
int rv;
if (csio_scsi_cmnd(ioreq) != scmnd) {
CSIO_INC_STATS(scsim, n_abrt_race_comp);
return;
}
ready = csio_is_lnode_ready(ln);
rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
if (rv != 0) {
if (ready)
CSIO_INC_STATS(scsim, n_abrt_busy_error);
else
CSIO_INC_STATS(scsim, n_cls_busy_error);
}
}
/*
* csio_scsi_abort_io_q - Abort all I/Os on given queue
* @scm: SCSI module.
* @q: Queue to abort.
* @tmo: Timeout in ms
*
* Attempt to abort all I/Os on given queue, and wait for a max
* of tmo milliseconds for them to complete. Returns success
* if all I/Os are aborted. Else returns -ETIMEDOUT.
* Should be entered with lock held. Exits with lock held.
* NOTE:
* Lock has to be held across the loop that aborts I/Os, since dropping the lock
* in between can cause the list to be corrupted. As a result, the caller
* of this function has to ensure that the number of I/os to be aborted
* is finite enough to not cause lock-held-for-too-long issues.
*/
static int
csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
{
struct csio_hw *hw = scm->hw;
struct list_head *tmp, *next;
int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
struct scsi_cmnd *scmnd;
if (list_empty(q))
return 0;
csio_dbg(hw, "Aborting SCSI I/Os\n");
/* Now abort/close I/Os in the queue passed */
list_for_each_safe(tmp, next, q) {
scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
}
/* Wait till all active I/Os are completed/aborted/closed */
while (!list_empty(q) && count--) {
spin_unlock_irq(&hw->lock);
msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
spin_lock_irq(&hw->lock);
}
/* all aborts completed */
if (list_empty(q))
return 0;
return -ETIMEDOUT;
}
/*
* csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
* @scm: SCSI module.
* @abort: abort required.
* Called with lock held, should exit with lock held.
* Can sleep when waiting for I/Os to complete.
*/
int
csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
{
struct csio_hw *hw = scm->hw;
int rv = 0;
int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
/* No I/Os pending */
if (list_empty(&scm->active_q))
return 0;
/* Wait until all active I/Os are completed */
while (!list_empty(&scm->active_q) && count--) {
spin_unlock_irq(&hw->lock);
msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
spin_lock_irq(&hw->lock);
}
/* all I/Os completed */
if (list_empty(&scm->active_q))
return 0;
/* Else abort */
if (abort) {
rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
if (rv == 0)
return rv;
csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
}
csio_scsi_cleanup_io_q(scm, &scm->active_q);
CSIO_DB_ASSERT(list_empty(&scm->active_q));
return rv;
}
/*
* csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
* @scm: SCSI module.
* @lnode: lnode
*
* Called with lock held, should exit with lock held.
* Can sleep (with dropped lock) when waiting for I/Os to complete.
*/
int
csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
{
struct csio_hw *hw = scm->hw;
struct csio_scsi_level_data sld;
int rv;
int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
sld.level = CSIO_LEV_LNODE;
sld.lnode = ln;
INIT_LIST_HEAD(&ln->cmpl_q);
csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
/* No I/Os pending on this lnode */
if (list_empty(&ln->cmpl_q))
return 0;
/* Wait until all active I/Os on this lnode are completed */
while (!list_empty(&ln->cmpl_q) && count--) {
spin_unlock_irq(&hw->lock);
msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
spin_lock_irq(&hw->lock);
}
/* all I/Os completed */
if (list_empty(&ln->cmpl_q))
return 0;
csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
/* I/Os are pending, abort them */
rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
if (rv != 0) {
csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
}
CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
return rv;
}
static ssize_t
csio_show_hw_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
struct csio_hw *hw = csio_lnode_to_hw(ln);
if (csio_is_hw_ready(hw))
return snprintf(buf, PAGE_SIZE, "ready\n");
else
return snprintf(buf, PAGE_SIZE, "not ready\n");
}
/* Device reset */
static ssize_t
csio_device_reset(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
struct csio_hw *hw = csio_lnode_to_hw(ln);
if (*buf != '1')
return -EINVAL;
/* Delete NPIV lnodes */
csio_lnodes_exit(hw, 1);
/* Block upper IOs */
csio_lnodes_block_request(hw);
spin_lock_irq(&hw->lock);
csio_hw_reset(hw);
spin_unlock_irq(&hw->lock);
/* Unblock upper IOs */
csio_lnodes_unblock_request(hw);
return count;
}
/* disable port */
static ssize_t
csio_disable_port(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
struct csio_hw *hw = csio_lnode_to_hw(ln);
bool disable;
if (*buf == '1' || *buf == '0')
disable = (*buf == '1') ? true : false;
else
return -EINVAL;
/* Block upper IOs */
csio_lnodes_block_by_port(hw, ln->portid);
spin_lock_irq(&hw->lock);
csio_disable_lnodes(hw, ln->portid, disable);
spin_unlock_irq(&hw->lock);
/* Unblock upper IOs */
csio_lnodes_unblock_by_port(hw, ln->portid);
return count;
}
/* Show debug level */
static ssize_t
csio_show_dbg_level(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
}
/* Store debug level */
static ssize_t
csio_store_dbg_level(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
struct csio_hw *hw = csio_lnode_to_hw(ln);
uint32_t dbg_level = 0;
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &dbg_level))
return -EINVAL;
ln->params.log_level = dbg_level;
hw->params.log_level = dbg_level;
return 0;
}
static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset);
static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port);
static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
csio_store_dbg_level);
static struct device_attribute *csio_fcoe_lport_attrs[] = {
&dev_attr_hw_state,
&dev_attr_device_reset,
&dev_attr_disable_port,
&dev_attr_dbg_level,
NULL,
};
static ssize_t
csio_show_num_reg_rnodes(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
}
static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
static struct device_attribute *csio_fcoe_vport_attrs[] = {
&dev_attr_num_reg_rnodes,
&dev_attr_dbg_level,
NULL,
};
static inline uint32_t
csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
{
struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
struct scatterlist *sg;
uint32_t bytes_left;
uint32_t bytes_copy;
uint32_t buf_off = 0;
uint32_t start_off = 0;
uint32_t sg_off = 0;
void *sg_addr;
void *buf_addr;
struct csio_dma_buf *dma_buf;
bytes_left = scsi_bufflen(scmnd);
sg = scsi_sglist(scmnd);
dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
/* Copy data from driver buffer to SGs of SCSI CMD */
while (bytes_left > 0 && sg && dma_buf) {
if (buf_off >= dma_buf->len) {
buf_off = 0;
dma_buf = (struct csio_dma_buf *)
csio_list_next(dma_buf);
continue;
}
if (start_off >= sg->length) {
start_off -= sg->length;
sg = sg_next(sg);
continue;
}
buf_addr = dma_buf->vaddr + buf_off;
sg_off = sg->offset + start_off;
bytes_copy = min((dma_buf->len - buf_off),
sg->length - start_off);
bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
bytes_copy);
sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
if (!sg_addr) {
csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
sg, req);
break;
}
csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
sg_addr, sg_off, buf_addr, bytes_copy);
memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
kunmap_atomic(sg_addr);
start_off += bytes_copy;
buf_off += bytes_copy;
bytes_left -= bytes_copy;
}
if (bytes_left > 0)
return DID_ERROR;
else
return DID_OK;
}
/*
* csio_scsi_err_handler - SCSI error handler.
* @hw: HW module.
* @req: IO request.
*
*/
static inline void
csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
{
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
struct csio_scsim *scm = csio_hw_to_scsim(hw);
struct fcp_resp_with_ext *fcp_resp;
struct fcp_resp_rsp_info *rsp_info;
struct csio_dma_buf *dma_buf;
uint8_t flags, scsi_status = 0;
uint32_t host_status = DID_OK;
uint32_t rsp_len = 0, sns_len = 0;
struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
switch (req->wr_status) {
case FW_HOSTERROR:
if (unlikely(!csio_is_hw_ready(hw)))
return;
host_status = DID_ERROR;
CSIO_INC_STATS(scm, n_hosterror);
break;
case FW_SCSI_RSP_ERR:
dma_buf = &req->dma_buf;
fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
flags = fcp_resp->resp.fr_flags;
scsi_status = fcp_resp->resp.fr_status;
if (flags & FCP_RSP_LEN_VAL) {
rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
(rsp_info->rsp_code != FCP_TMF_CMPL)) {
host_status = DID_ERROR;
goto out;
}
}
if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
if (sns_len > SCSI_SENSE_BUFFERSIZE)
sns_len = SCSI_SENSE_BUFFERSIZE;
memcpy(cmnd->sense_buffer,
&rsp_info->_fr_resvd[0] + rsp_len, sns_len);
CSIO_INC_STATS(scm, n_autosense);
}
scsi_set_resid(cmnd, 0);
/* Under run */
if (flags & FCP_RESID_UNDER) {
scsi_set_resid(cmnd,
be32_to_cpu(fcp_resp->ext.fr_resid));
if (!(flags & FCP_SNS_LEN_VAL) &&
(scsi_status == SAM_STAT_GOOD) &&
((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
< cmnd->underflow))
host_status = DID_ERROR;
} else if (flags & FCP_RESID_OVER)
host_status = DID_ERROR;
CSIO_INC_STATS(scm, n_rsperror);
break;
case FW_SCSI_OVER_FLOW_ERR:
csio_warn(hw,
"Over-flow error,cmnd:0x%x expected len:0x%x"
" resid:0x%x\n", cmnd->cmnd[0],
scsi_bufflen(cmnd), scsi_get_resid(cmnd));
host_status = DID_ERROR;
CSIO_INC_STATS(scm, n_ovflerror);
break;
case FW_SCSI_UNDER_FLOW_ERR:
csio_warn(hw,
"Under-flow error,cmnd:0x%x expected"
" len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n",
cmnd->cmnd[0], scsi_bufflen(cmnd),
scsi_get_resid(cmnd), cmnd->device->lun,
rn->flowid);
host_status = DID_ERROR;
CSIO_INC_STATS(scm, n_unflerror);
break;
case FW_SCSI_ABORT_REQUESTED:
case FW_SCSI_ABORTED:
case FW_SCSI_CLOSE_REQUESTED:
csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
cmnd->cmnd[0],
(req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
"closed" : "aborted");
/*
* csio_eh_abort_handler checks this value to
* succeed or fail the abort request.
*/
host_status = DID_REQUEUE;
if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
CSIO_INC_STATS(scm, n_closed);
else
CSIO_INC_STATS(scm, n_aborted);
break;
case FW_SCSI_ABORT_TIMEDOUT:
/* FW timed out the abort itself */
csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
req, cmnd, req->wr_status);
host_status = DID_ERROR;
CSIO_INC_STATS(scm, n_abrt_timedout);
break;
case FW_RDEV_NOT_READY:
/*
* In firmware, a RDEV can get into this state
* temporarily, before moving into dissapeared/lost
* state. So, the driver should complete the request equivalent
* to device-disappeared!
*/
CSIO_INC_STATS(scm, n_rdev_nr_error);
host_status = DID_ERROR;
break;
case FW_ERR_RDEV_LOST:
CSIO_INC_STATS(scm, n_rdev_lost_error);
host_status = DID_ERROR;
break;
case FW_ERR_RDEV_LOGO:
CSIO_INC_STATS(scm, n_rdev_logo_error);
host_status = DID_ERROR;
break;
case FW_ERR_RDEV_IMPL_LOGO:
host_status = DID_ERROR;
break;
case FW_ERR_LINK_DOWN:
CSIO_INC_STATS(scm, n_link_down_error);
host_status = DID_ERROR;
break;
case FW_FCOE_NO_XCHG:
CSIO_INC_STATS(scm, n_no_xchg_error);
host_status = DID_ERROR;
break;
default:
csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
req->wr_status, req, cmnd);
CSIO_DB_ASSERT(0);
CSIO_INC_STATS(scm, n_unknown_error);
host_status = DID_ERROR;
break;
}
out:
if (req->nsge > 0)
scsi_dma_unmap(cmnd);
cmnd->result = (((host_status) << 16) | scsi_status);
cmnd->scsi_done(cmnd);
/* Wake up waiting threads */
csio_scsi_cmnd(req) = NULL;
complete_all(&req->cmplobj);
}
/*
* csio_scsi_cbfn - SCSI callback function.
* @hw: HW module.
* @req: IO request.
*
*/
static void
csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
{
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
uint8_t scsi_status = SAM_STAT_GOOD;
uint32_t host_status = DID_OK;
if (likely(req->wr_status == FW_SUCCESS)) {
if (req->nsge > 0) {
scsi_dma_unmap(cmnd);
if (req->dcopy)
host_status = csio_scsi_copy_to_sgl(hw, req);
}
cmnd->result = (((host_status) << 16) | scsi_status);
cmnd->scsi_done(cmnd);
csio_scsi_cmnd(req) = NULL;
CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
} else {
/* Error handling */
csio_scsi_err_handler(hw, req);
}
}
/**
* csio_queuecommand - Entry point to kickstart an I/O request.
* @host: The scsi_host pointer.
* @cmnd: The I/O request from ML.
*
* This routine does the following:
* - Checks for HW and Rnode module readiness.
* - Gets a free ioreq structure (which is already initialized
* to uninit during its allocation).
* - Maps SG elements.
* - Initializes ioreq members.
* - Kicks off the SCSI state machine for this IO.
* - Returns busy status on error.
*/
static int
csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
{
struct csio_lnode *ln = shost_priv(host);
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct csio_scsim *scsim = csio_hw_to_scsim(hw);
struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
struct csio_ioreq *ioreq = NULL;
unsigned long flags;
int nsge = 0;
int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
int retval;
int cpu;
struct csio_scsi_qset *sqset;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
if (!blk_rq_cpu_valid(cmnd->request))
cpu = smp_processor_id();
else
cpu = cmnd->request->cpu;
sqset = &hw->sqset[ln->portid][cpu];
nr = fc_remote_port_chkready(rport);
if (nr) {
cmnd->result = nr;
CSIO_INC_STATS(scsim, n_rn_nr_error);
goto err_done;
}
if (unlikely(!csio_is_hw_ready(hw))) {
cmnd->result = (DID_REQUEUE << 16);
CSIO_INC_STATS(scsim, n_hw_nr_error);
goto err_done;
}
/* Get req->nsge, if there are SG elements to be mapped */
nsge = scsi_dma_map(cmnd);
if (unlikely(nsge < 0)) {
CSIO_INC_STATS(scsim, n_dmamap_error);
goto err;
}
/* Do we support so many mappings? */
if (unlikely(nsge > scsim->max_sge)) {
csio_warn(hw,
"More SGEs than can be supported."
" SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
CSIO_INC_STATS(scsim, n_unsupp_sge_error);
goto err_dma_unmap;
}
/* Get a free ioreq structure - SM is already set to uninit */
ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
if (!ioreq) {
csio_err(hw, "Out of I/O request elements. Active #:%d\n",
scsim->stats.n_active);
CSIO_INC_STATS(scsim, n_no_req_error);
goto err_dma_unmap;
}
ioreq->nsge = nsge;
ioreq->lnode = ln;
ioreq->rnode = rn;
ioreq->iq_idx = sqset->iq_idx;
ioreq->eq_idx = sqset->eq_idx;
ioreq->wr_status = 0;
ioreq->drv_status = 0;
csio_scsi_cmnd(ioreq) = (void *)cmnd;
ioreq->tmo = 0;
ioreq->datadir = cmnd->sc_data_direction;
if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
CSIO_INC_STATS(ln, n_output_requests);
ln->stats.n_output_bytes += scsi_bufflen(cmnd);
} else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
CSIO_INC_STATS(ln, n_input_requests);
ln->stats.n_input_bytes += scsi_bufflen(cmnd);
} else
CSIO_INC_STATS(ln, n_control_requests);
/* Set cbfn */
ioreq->io_cbfn = csio_scsi_cbfn;
/* Needed during abort */
cmnd->host_scribble = (unsigned char *)ioreq;
cmnd->SCp.Message = 0;
/* Kick off SCSI IO SM on the ioreq */
spin_lock_irqsave(&hw->lock, flags);
retval = csio_scsi_start_io(ioreq);
spin_unlock_irqrestore(&hw->lock, flags);
if (retval != 0) {
csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
ioreq, retval);
CSIO_INC_STATS(scsim, n_busy_error);
goto err_put_req;
}
return 0;
err_put_req:
csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
err_dma_unmap:
if (nsge > 0)
scsi_dma_unmap(cmnd);
err:
return rv;
err_done:
cmnd->scsi_done(cmnd);
return 0;
}
static int
csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
{
int rv;
int cpu = smp_processor_id();
struct csio_lnode *ln = ioreq->lnode;
struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
/*
* Use current processor queue for posting the abort/close, but retain
* the ingress queue ID of the original I/O being aborted/closed - we
* need the abort/close completion to be received on the same queue
* as the original I/O.
*/
ioreq->eq_idx = sqset->eq_idx;
if (abort == SCSI_ABORT)
rv = csio_scsi_abort(ioreq);
else
rv = csio_scsi_close(ioreq);
return rv;
}
static int
csio_eh_abort_handler(struct scsi_cmnd *cmnd)
{
struct csio_ioreq *ioreq;
struct csio_lnode *ln = shost_priv(cmnd->device->host);
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct csio_scsim *scsim = csio_hw_to_scsim(hw);
int ready = 0, ret;
unsigned long tmo = 0;
int rv;
struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
ret = fc_block_scsi_eh(cmnd);
if (ret)
return ret;
ioreq = (struct csio_ioreq *)cmnd->host_scribble;
if (!ioreq)
return SUCCESS;
if (!rn)
return FAILED;
csio_dbg(hw,
"Request to abort ioreq:%p cmd:%p cdb:%08llx"
" ssni:0x%x lun:%d iq:0x%x\n",
ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
CSIO_INC_STATS(scsim, n_abrt_race_comp);
return SUCCESS;
}
ready = csio_is_lnode_ready(ln);
tmo = CSIO_SCSI_ABRT_TMO_MS;
spin_lock_irq(&hw->lock);
rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
spin_unlock_irq(&hw->lock);
if (rv != 0) {
if (rv == -EINVAL) {
/* Return success, if abort/close request issued on
* already completed IO
*/
return SUCCESS;
}
if (ready)
CSIO_INC_STATS(scsim, n_abrt_busy_error);
else
CSIO_INC_STATS(scsim, n_cls_busy_error);
goto inval_scmnd;
}
/* Wait for completion */
init_completion(&ioreq->cmplobj);
wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
/* FW didnt respond to abort within our timeout */
if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
CSIO_INC_STATS(scsim, n_abrt_timedout);
inval_scmnd:
if (ioreq->nsge > 0)
scsi_dma_unmap(cmnd);
spin_lock_irq(&hw->lock);
csio_scsi_cmnd(ioreq) = NULL;
spin_unlock_irq(&hw->lock);
cmnd->result = (DID_ERROR << 16);
cmnd->scsi_done(cmnd);
return FAILED;
}
/* FW successfully aborted the request */
if (host_byte(cmnd->result) == DID_REQUEUE) {
csio_info(hw,
"Aborted SCSI command to (%d:%d) serial#:0x%lx\n",
cmnd->device->id, cmnd->device->lun,
cmnd->serial_number);
return SUCCESS;
} else {
csio_info(hw,
"Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n",
cmnd->device->id, cmnd->device->lun,
cmnd->serial_number);
return FAILED;
}
}
/*
* csio_tm_cbfn - TM callback function.
* @hw: HW module.
* @req: IO request.
*
* Cache the result in 'cmnd', since ioreq will be freed soon
* after we return from here, and the waiting thread shouldnt trust
* the ioreq contents.
*/
static void
csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
{
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
struct csio_dma_buf *dma_buf;
uint8_t flags = 0;
struct fcp_resp_with_ext *fcp_resp;
struct fcp_resp_rsp_info *rsp_info;
csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
req, req->wr_status);
/* Cache FW return status */
cmnd->SCp.Status = req->wr_status;
/* Special handling based on FCP response */
/*
* FW returns us this error, if flags were set. FCP4 says
* FCP_RSP_LEN_VAL in flags shall be set for TM completions.
* So if a target were to set this bit, we expect that the
* rsp_code is set to FCP_TMF_CMPL for a successful TM
* completion. Any other rsp_code means TM operation failed.
* If a target were to just ignore setting flags, we treat
* the TM operation as success, and FW returns FW_SUCCESS.
*/
if (req->wr_status == FW_SCSI_RSP_ERR) {
dma_buf = &req->dma_buf;
fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
flags = fcp_resp->resp.fr_flags;
/* Modify return status if flags indicate success */
if (flags & FCP_RSP_LEN_VAL)
if (rsp_info->rsp_code == FCP_TMF_CMPL)
cmnd->SCp.Status = FW_SUCCESS;
csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
}
/* Wake up the TM handler thread */
csio_scsi_cmnd(req) = NULL;
}
static int
csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
{
struct csio_lnode *ln = shost_priv(cmnd->device->host);
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct csio_scsim *scsim = csio_hw_to_scsim(hw);
struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
struct csio_ioreq *ioreq = NULL;
struct csio_scsi_qset *sqset;
unsigned long flags;
int retval;
int count, ret;
LIST_HEAD(local_q);
struct csio_scsi_level_data sld;
if (!rn)
goto fail;
csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n",
cmnd->device->lun, rn->flowid, rn->scsi_id);
if (!csio_is_lnode_ready(ln)) {
csio_err(hw,
"LUN reset cannot be issued on non-ready"
" local node vnpi:0x%x (LUN:%d)\n",
ln->vnp_flowid, cmnd->device->lun);
goto fail;
}
/* Lnode is ready, now wait on rport node readiness */
ret = fc_block_scsi_eh(cmnd);
if (ret)
return ret;
/*
* If we have blocked in the previous call, at this point, either the
* remote node has come back online, or device loss timer has fired
* and the remote node is destroyed. Allow the LUN reset only for
* the former case, since LUN reset is a TMF I/O on the wire, and we
* need a valid session to issue it.
*/
if (fc_remote_port_chkready(rn->rport)) {
csio_err(hw,
"LUN reset cannot be issued on non-ready"
" remote node ssni:0x%x (LUN:%d)\n",
rn->flowid, cmnd->device->lun);
goto fail;
}
/* Get a free ioreq structure - SM is already set to uninit */
ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
if (!ioreq) {
csio_err(hw, "Out of IO request elements. Active # :%d\n",
scsim->stats.n_active);
goto fail;
}
sqset = &hw->sqset[ln->portid][smp_processor_id()];
ioreq->nsge = 0;
ioreq->lnode = ln;
ioreq->rnode = rn;
ioreq->iq_idx = sqset->iq_idx;
ioreq->eq_idx = sqset->eq_idx;
csio_scsi_cmnd(ioreq) = cmnd;
cmnd->host_scribble = (unsigned char *)ioreq;
cmnd->SCp.Status = 0;
cmnd->SCp.Message = FCP_TMF_LUN_RESET;
ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
/*
* FW times the LUN reset for ioreq->tmo, so we got to wait a little
* longer (10s for now) than that to allow FW to return the timed
* out command.
*/
count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
/* Set cbfn */
ioreq->io_cbfn = csio_tm_cbfn;
/* Save of the ioreq info for later use */
sld.level = CSIO_LEV_LUN;
sld.lnode = ioreq->lnode;
sld.rnode = ioreq->rnode;
sld.oslun = (uint64_t)cmnd->device->lun;
spin_lock_irqsave(&hw->lock, flags);
/* Kick off TM SM on the ioreq */
retval = csio_scsi_start_tm(ioreq);
spin_unlock_irqrestore(&hw->lock, flags);
if (retval != 0) {
csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
ioreq, retval);
goto fail_ret_ioreq;
}
csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
count * (CSIO_SCSI_TM_POLL_MS / 1000));
/* Wait for completion */
while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
&& count--)
msleep(CSIO_SCSI_TM_POLL_MS);
/* LUN reset timed-out */
if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
csio_err(hw, "LUN reset (%d:%d) timed out\n",
cmnd->device->id, cmnd->device->lun);
spin_lock_irq(&hw->lock);
csio_scsi_drvcleanup(ioreq);
list_del_init(&ioreq->sm.sm_list);
spin_unlock_irq(&hw->lock);
goto fail_ret_ioreq;
}
/* LUN reset returned, check cached status */
if (cmnd->SCp.Status != FW_SUCCESS) {
csio_err(hw, "LUN reset failed (%d:%d), status: %d\n",
cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
goto fail;
}
/* LUN reset succeeded, Start aborting affected I/Os */
/*
* Since the host guarantees during LUN reset that there
* will not be any more I/Os to that LUN, until the LUN reset
* completes, we gather pending I/Os after the LUN reset.
*/
spin_lock_irq(&hw->lock);
csio_scsi_gather_active_ios(scsim, &sld, &local_q);
retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
spin_unlock_irq(&hw->lock);
/* Aborts may have timed out */
if (retval != 0) {
csio_err(hw,
"Attempt to abort I/Os during LUN reset of %d"
" returned %d\n", cmnd->device->lun, retval);
/* Return I/Os back to active_q */
spin_lock_irq(&hw->lock);
list_splice_tail_init(&local_q, &scsim->active_q);
spin_unlock_irq(&hw->lock);
goto fail;
}
CSIO_INC_STATS(rn, n_lun_rst);
csio_info(hw, "LUN reset occurred (%d:%d)\n",
cmnd->device->id, cmnd->device->lun);
return SUCCESS;
fail_ret_ioreq:
csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
fail:
CSIO_INC_STATS(rn, n_lun_rst_fail);
return FAILED;
}
static int
csio_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
return 0;
}
static int
csio_slave_configure(struct scsi_device *sdev)
{
if (sdev->tagged_supported)
scsi_activate_tcq(sdev, csio_lun_qdepth);
else
scsi_deactivate_tcq(sdev, csio_lun_qdepth);
return 0;
}
static void
csio_slave_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
}
static int
csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct csio_lnode *ln = shost_priv(shost);
int rv = 1;
spin_lock_irq(shost->host_lock);
if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
goto out;
rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
csio_delta_scan_tmo * HZ);
out:
spin_unlock_irq(shost->host_lock);
return rv;
}
struct scsi_host_template csio_fcoe_shost_template = {
.module = THIS_MODULE,
.name = CSIO_DRV_DESC,
.proc_name = KBUILD_MODNAME,
.queuecommand = csio_queuecommand,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
.slave_alloc = csio_slave_alloc,
.slave_configure = csio_slave_configure,
.slave_destroy = csio_slave_destroy,
.scan_finished = csio_scan_finished,
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = csio_fcoe_lport_attrs,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
struct scsi_host_template csio_fcoe_shost_vport_template = {
.module = THIS_MODULE,
.name = CSIO_DRV_DESC,
.proc_name = KBUILD_MODNAME,
.queuecommand = csio_queuecommand,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
.slave_alloc = csio_slave_alloc,
.slave_configure = csio_slave_configure,
.slave_destroy = csio_slave_destroy,
.scan_finished = csio_scan_finished,
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = csio_fcoe_vport_attrs,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
/*
* csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
* @scm: SCSI Module
* @hw: HW device.
* @buf_size: buffer size
* @num_buf : Number of buffers.
*
* This routine allocates DMA buffers required for SCSI Data xfer, if
* each SGL buffer for a SCSI Read request posted by SCSI midlayer are
* not virtually contiguous.
*/
static int
csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
int buf_size, int num_buf)
{
int n = 0;
struct list_head *tmp;
struct csio_dma_buf *ddp_desc = NULL;
uint32_t unit_size = 0;
if (!num_buf)
return 0;
if (!buf_size)
return -EINVAL;
INIT_LIST_HEAD(&scm->ddp_freelist);
/* Align buf size to page size */
buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
/* Initialize dma descriptors */
for (n = 0; n < num_buf; n++) {
/* Set unit size to request size */
unit_size = buf_size;
ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
if (!ddp_desc) {
csio_err(hw,
"Failed to allocate ddp descriptors,"
" Num allocated = %d.\n",
scm->stats.n_free_ddp);
goto no_mem;
}
/* Allocate Dma buffers for DDP */
ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
&ddp_desc->paddr);
if (!ddp_desc->vaddr) {
csio_err(hw,
"SCSI response DMA buffer (ddp) allocation"
" failed!\n");
kfree(ddp_desc);
goto no_mem;
}
ddp_desc->len = unit_size;
/* Added it to scsi ddp freelist */
list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
CSIO_INC_STATS(scm, n_free_ddp);
}
return 0;
no_mem:
/* release dma descs back to freelist and free dma memory */
list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp);
pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
ddp_desc->paddr);
list_del_init(&ddp_desc->list);
kfree(ddp_desc);
}
scm->stats.n_free_ddp = 0;
return -ENOMEM;
}
/*
* csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
* @scm: SCSI Module
* @hw: HW device.
*
* This routine frees ddp buffers.
*/
static void
csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
{
struct list_head *tmp;
struct csio_dma_buf *ddp_desc;
/* release dma descs back to freelist and free dma memory */
list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp);
pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
ddp_desc->paddr);
list_del_init(&ddp_desc->list);
kfree(ddp_desc);
}
scm->stats.n_free_ddp = 0;
}
/**
* csio_scsim_init - Initialize SCSI Module
* @scm: SCSI Module
* @hw: HW module
*
*/
int
csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
{
int i;
struct csio_ioreq *ioreq;
struct csio_dma_buf *dma_buf;
INIT_LIST_HEAD(&scm->active_q);
scm->hw = hw;
scm->proto_cmd_len = sizeof(struct fcp_cmnd);
scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
scm->max_sge = CSIO_SCSI_MAX_SGE;
spin_lock_init(&scm->freelist_lock);
/* Pre-allocate ioreqs and initialize them */
INIT_LIST_HEAD(&scm->ioreq_freelist);
for (i = 0; i < csio_scsi_ioreqs; i++) {
ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
if (!ioreq) {
csio_err(hw,
"I/O request element allocation failed, "
" Num allocated = %d.\n",
scm->stats.n_free_ioreq);
goto free_ioreq;
}
/* Allocate Dma buffers for Response Payload */
dma_buf = &ioreq->dma_buf;
dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL,
&dma_buf->paddr);
if (!dma_buf->vaddr) {
csio_err(hw,
"SCSI response DMA buffer allocation"
" failed!\n");
kfree(ioreq);
goto free_ioreq;
}
dma_buf->len = scm->proto_rsp_len;
/* Set state to uninit */
csio_init_state(&ioreq->sm, csio_scsis_uninit);
INIT_LIST_HEAD(&ioreq->gen_list);
init_completion(&ioreq->cmplobj);
list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
CSIO_INC_STATS(scm, n_free_ioreq);
}
if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
goto free_ioreq;
return 0;
free_ioreq:
/*
* Free up existing allocations, since an error
* from here means we are returning for good
*/
while (!list_empty(&scm->ioreq_freelist)) {
struct csio_sm *tmp;
tmp = list_first_entry(&scm->ioreq_freelist,
struct csio_sm, sm_list);
list_del_init(&tmp->sm_list);
ioreq = (struct csio_ioreq *)tmp;
dma_buf = &ioreq->dma_buf;
pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr,
dma_buf->paddr);
kfree(ioreq);
}
scm->stats.n_free_ioreq = 0;
return -ENOMEM;
}
/**
* csio_scsim_exit: Uninitialize SCSI Module
* @scm: SCSI Module
*
*/
void
csio_scsim_exit(struct csio_scsim *scm)
{
struct csio_ioreq *ioreq;
struct csio_dma_buf *dma_buf;
while (!list_empty(&scm->ioreq_freelist)) {
struct csio_sm *tmp;
tmp = list_first_entry(&scm->ioreq_freelist,
struct csio_sm, sm_list);
list_del_init(&tmp->sm_list);
ioreq = (struct csio_ioreq *)tmp;
dma_buf = &ioreq->dma_buf;
pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr,
dma_buf->paddr);
kfree(ioreq);
}
scm->stats.n_free_ioreq = 0;
csio_scsi_free_ddp_bufs(scm, scm->hw);
}
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_SCSI_H__
#define __CSIO_SCSI_H__
#include <linux/spinlock_types.h>
#include <linux/completion.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_fcp.h>
#include "csio_defs.h"
#include "csio_wr.h"
extern struct scsi_host_template csio_fcoe_shost_template;
extern struct scsi_host_template csio_fcoe_shost_vport_template;
extern int csio_scsi_eqsize;
extern int csio_scsi_iqlen;
extern int csio_scsi_ioreqs;
extern uint32_t csio_max_scan_tmo;
extern uint32_t csio_delta_scan_tmo;
extern int csio_lun_qdepth;
/*
**************************** NOTE *******************************
* How do we calculate MAX FCoE SCSI SGEs? Here is the math:
* Max Egress WR size = 512 bytes
* One SCSI egress WR has the following fixed no of bytes:
* 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR
* + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD
* ------
* 80
* ------
* That leaves us with 512 - 96 = 432 bytes for data SGE. Using
* struct ulptx_sgl header for the SGE consumes:
* - 4 bytes for cmnd_sge.
* - 12 bytes for the first SGL.
* That leaves us with 416 bytes for the remaining SGE pairs. Which is
* is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs,
* or 34 SGEs. Adding the first SGE fetches us 35 SGEs.
*/
#define CSIO_SCSI_MAX_SGE 35
#define CSIO_SCSI_ABRT_TMO_MS 60000
#define CSIO_SCSI_LUNRST_TMO_MS 60000
#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than
* all TM timeouts.
*/
#define CSIO_SCSI_IQ_WRSZ 128
#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ)
#define CSIO_MAX_SNS_LEN 128
#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN)
/* Reference to scsi_cmnd */
#define csio_scsi_cmnd(req) ((req)->scratch1)
struct csio_scsi_stats {
uint64_t n_tot_success; /* Total number of good I/Os */
uint32_t n_rn_nr_error; /* No. of remote-node-not-
* ready errors
*/
uint32_t n_hw_nr_error; /* No. of hw-module-not-
* ready errors
*/
uint32_t n_dmamap_error; /* No. of DMA map erros */
uint32_t n_unsupp_sge_error; /* No. of too-many-SGes
* errors.
*/
uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */
uint32_t n_busy_error; /* No. of -EBUSY errors */
uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */
uint32_t n_rsperror; /* No. of response errors */
uint32_t n_autosense; /* No. of auto sense replies */
uint32_t n_ovflerror; /* No. of overflow errors */
uint32_t n_unflerror; /* No. of underflow errors */
uint32_t n_rdev_nr_error;/* No. of rdev not
* ready errors
*/
uint32_t n_rdev_lost_error;/* No. of rdev lost errors */
uint32_t n_rdev_logo_error;/* No. of rdev logo errors */
uint32_t n_link_down_error;/* No. of link down errors */
uint32_t n_no_xchg_error; /* No. no exchange error */
uint32_t n_unknown_error;/* No. of unhandled errors */
uint32_t n_aborted; /* No. of aborted I/Os */
uint32_t n_abrt_timedout; /* No. of abort timedouts */
uint32_t n_abrt_fail; /* No. of abort failures */
uint32_t n_abrt_dups; /* No. of duplicate aborts */
uint32_t n_abrt_race_comp; /* No. of aborts that raced
* with completions.
*/
uint32_t n_abrt_busy_error;/* No. of abort failures
* due to -EBUSY.
*/
uint32_t n_closed; /* No. of closed I/Os */
uint32_t n_cls_busy_error; /* No. of close failures
* due to -EBUSY.
*/
uint32_t n_active; /* No. of IOs in active_q */
uint32_t n_tm_active; /* No. of TMs in active_q */
uint32_t n_wcbfn; /* No. of I/Os in worker
* cbfn q
*/
uint32_t n_free_ioreq; /* No. of freelist entries */
uint32_t n_free_ddp; /* No. of DDP freelist */
uint32_t n_unaligned; /* No. of Unaligned SGls */
uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */
uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/
};
struct csio_scsim {
struct csio_hw *hw; /* Pointer to HW moduel */
uint8_t max_sge; /* Max SGE */
uint8_t proto_cmd_len; /* Proto specific SCSI
* cmd length
*/
uint16_t proto_rsp_len; /* Proto specific SCSI
* response length
*/
spinlock_t freelist_lock; /* Lock for ioreq freelist */
struct list_head active_q; /* Outstanding SCSI I/Os */
struct list_head ioreq_freelist; /* Free list of ioreq's */
struct list_head ddp_freelist; /* DDP descriptor freelist */
struct csio_scsi_stats stats; /* This module's statistics */
};
/* State machine defines */
enum csio_scsi_ev {
CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */
CSIO_SCSIE_START_TM, /* Start a TM IO */
CSIO_SCSIE_COMPLETED, /* IO Completed */
CSIO_SCSIE_ABORT, /* Abort IO */
CSIO_SCSIE_ABORTED, /* IO Aborted */
CSIO_SCSIE_CLOSE, /* Close exchange */
CSIO_SCSIE_CLOSED, /* Exchange closed */
CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually
* cleanup this I/O.
*/
};
enum csio_scsi_lev {
CSIO_LEV_ALL = 1,
CSIO_LEV_LNODE,
CSIO_LEV_RNODE,
CSIO_LEV_LUN,
};
struct csio_scsi_level_data {
enum csio_scsi_lev level;
struct csio_rnode *rnode;
struct csio_lnode *lnode;
uint64_t oslun;
};
static inline struct csio_ioreq *
csio_get_scsi_ioreq(struct csio_scsim *scm)
{
struct csio_sm *req;
if (likely(!list_empty(&scm->ioreq_freelist))) {
req = list_first_entry(&scm->ioreq_freelist,
struct csio_sm, sm_list);
list_del_init(&req->sm_list);
CSIO_DEC_STATS(scm, n_free_ioreq);
return (struct csio_ioreq *)req;
} else
return NULL;
}
static inline void
csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq)
{
list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
CSIO_INC_STATS(scm, n_free_ioreq);
}
static inline void
csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist,
int n)
{
list_splice_init(reqlist, &scm->ioreq_freelist);
scm->stats.n_free_ioreq += n;
}
static inline struct csio_dma_buf *
csio_get_scsi_ddp(struct csio_scsim *scm)
{
struct csio_dma_buf *ddp;
if (likely(!list_empty(&scm->ddp_freelist))) {
ddp = list_first_entry(&scm->ddp_freelist,
struct csio_dma_buf, list);
list_del_init(&ddp->list);
CSIO_DEC_STATS(scm, n_free_ddp);
return ddp;
} else
return NULL;
}
static inline void
csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp)
{
list_add_tail(&ddp->list, &scm->ddp_freelist);
CSIO_INC_STATS(scm, n_free_ddp);
}
static inline void
csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist,
int n)
{
list_splice_tail_init(reqlist, &scm->ddp_freelist);
scm->stats.n_free_ddp += n;
}
static inline void
csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED);
if (csio_list_deleted(&ioreq->sm.sm_list))
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED);
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED);
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_drvcleanup(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP);
}
/*
* csio_scsi_start_io - Kick starts the IO SM.
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_start_io(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO);
return ioreq->drv_status;
}
/*
* csio_scsi_start_tm - Kicks off the Task management IO SM.
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_start_tm(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM);
return ioreq->drv_status;
}
/*
* csio_scsi_abort - Abort an IO request
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_abort(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT);
return ioreq->drv_status;
}
/*
* csio_scsi_close - Close an IO request
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_close(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE);
return ioreq->drv_status;
}
void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *);
int csio_scsim_cleanup_io(struct csio_scsim *, bool abort);
int csio_scsim_cleanup_io_lnode(struct csio_scsim *,
struct csio_lnode *);
struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *,
void *, uint8_t **);
int csio_scsi_qconfig(struct csio_hw *);
int csio_scsim_init(struct csio_scsim *, struct csio_hw *);
void csio_scsim_exit(struct csio_scsim *);
#endif /* __CSIO_SCSI_H__ */
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <linux/cache.h>
#include "csio_hw.h"
#include "csio_wr.h"
#include "csio_mb.h"
#include "csio_defs.h"
int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */
static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */
int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
static int csio_sge_timer_reg = 1;
#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
static void
csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
{
sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
reg * sizeof(uint32_t));
}
/* Free list buffer size */
static inline uint32_t
csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
{
return sge->sge_fl_buf_size[buf->paddr & 0xF];
}
/* Size of the egress queue status page */
static inline uint32_t
csio_wr_qstat_pgsz(struct csio_hw *hw)
{
return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;
}
/* Ring freelist doorbell */
static inline void
csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
{
/*
* Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
* number of bytes in the freelist queue. This translates to atleast
* 8 freelist buffer pointers (since each pointer is 8 bytes).
*/
if (flq->inc_idx >= 8) {
csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
PIDX(flq->inc_idx / 8),
MYPF_REG(SGE_PF_KDOORBELL));
flq->inc_idx &= 7;
}
}
/* Write a 0 cidx increment value to enable SGE interrupts for this queue */
static void
csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
{
csio_wr_reg32(hw, CIDXINC(0) |
INGRESSQID(iqid) |
TIMERREG(X_TIMERREG_RESTART_COUNTER),
MYPF_REG(SGE_PF_GTS));
}
/*
* csio_wr_fill_fl - Populate the FL buffers of a FL queue.
* @hw: HW module.
* @flq: Freelist queue.
*
* Fill up freelist buffer entries with buffers of size specified
* in the size register.
*
*/
static int
csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
__be64 *d = (__be64 *)(flq->vstart);
struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
uint64_t paddr;
int sreg = flq->un.fl.sreg;
int n = flq->credits;
while (n--) {
buf->len = sge->sge_fl_buf_size[sreg];
buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
&buf->paddr);
if (!buf->vaddr) {
csio_err(hw, "Could only fill %d buffers!\n", n + 1);
return -ENOMEM;
}
paddr = buf->paddr | (sreg & 0xF);
*d++ = cpu_to_be64(paddr);
buf++;
}
return 0;
}
/*
* csio_wr_update_fl -
* @hw: HW module.
* @flq: Freelist queue.
*
*
*/
static inline void
csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
{
flq->inc_idx += n;
flq->pidx += n;
if (unlikely(flq->pidx >= flq->credits))
flq->pidx -= (uint16_t)flq->credits;
CSIO_INC_STATS(flq, n_flq_refill);
}
/*
* csio_wr_alloc_q - Allocate a WR queue and initialize it.
* @hw: HW module
* @qsize: Size of the queue in bytes
* @wrsize: Since of WR in this queue, if fixed.
* @type: Type of queue (Ingress/Egress/Freelist)
* @owner: Module that owns this queue.
* @nflb: Number of freelist buffers for FL.
* @sreg: What is the FL buffer size register?
* @iq_int_handler: Ingress queue handler in INTx mode.
*
* This function allocates and sets up a queue for the caller
* of size qsize, aligned at the required boundary. This is subject to
* be free entries being available in the queue array. If one is found,
* it is initialized with the allocated queue, marked as being used (owner),
* and a handle returned to the caller in form of the queue's index
* into the q_arr array.
* If user has indicated a freelist (by specifying nflb > 0), create
* another queue (with its own index into q_arr) for the freelist. Allocate
* memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
* idx in the ingress queue's flq.idx. This is how a Freelist is associated
* with its owning ingress queue.
*/
int
csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
uint16_t type, void *owner, uint32_t nflb, int sreg,
iq_handler_t iq_intx_handler)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *q, *flq;
int free_idx = wrm->free_qidx;
int ret_idx = free_idx;
uint32_t qsz;
int flq_idx;
if (free_idx >= wrm->num_q) {
csio_err(hw, "No more free queues.\n");
return -1;
}
switch (type) {
case CSIO_EGRESS:
qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
break;
case CSIO_INGRESS:
switch (wrsize) {
case 16:
case 32:
case 64:
case 128:
break;
default:
csio_err(hw, "Invalid Ingress queue WR size:%d\n",
wrsize);
return -1;
}
/*
* Number of elements must be a multiple of 16
* So this includes status page size
*/
qsz = ALIGN(qsize/wrsize, 16) * wrsize;
break;
case CSIO_FREELIST:
qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
break;
default:
csio_err(hw, "Invalid queue type: 0x%x\n", type);
return -1;
}
q = wrm->q_arr[free_idx];
q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
"queue at id: %d size: %d\n", free_idx, qsize);
return -1;
}
/*
* We need to zero out the contents, importantly for ingress,
* since we start with a generatiom bit of 1 for ingress.
*/
memset(q->vstart, 0, qsz);
q->type = type;
q->owner = owner;
q->pidx = q->cidx = q->inc_idx = 0;
q->size = qsz;
q->wr_sz = wrsize; /* If using fixed size WRs */
wrm->free_qidx++;
if (type == CSIO_INGRESS) {
/* Since queue area is set to zero */
q->un.iq.genbit = 1;
/*
* Ingress queue status page size is always the size of
* the ingress queue entry.
*/
q->credits = (qsz - q->wr_sz) / q->wr_sz;
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
- q->wr_sz);
/* Allocate memory for FL if requested */
if (nflb > 0) {
flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
sizeof(__be64), CSIO_FREELIST,
owner, 0, sreg, NULL);
if (flq_idx == -1) {
csio_err(hw,
"Failed to allocate FL queue"
" for IQ idx:%d\n", free_idx);
return -1;
}
/* Associate the new FL with the Ingress quue */
q->un.iq.flq_idx = flq_idx;
flq = wrm->q_arr[q->un.iq.flq_idx];
flq->un.fl.bufs = kzalloc(flq->credits *
sizeof(struct csio_dma_buf),
GFP_KERNEL);
if (!flq->un.fl.bufs) {
csio_err(hw,
"Failed to allocate FL queue bufs"
" for IQ idx:%d\n", free_idx);
return -1;
}
flq->un.fl.packen = 0;
flq->un.fl.offset = 0;
flq->un.fl.sreg = sreg;
/* Fill up the free list buffers */
if (csio_wr_fill_fl(hw, flq))
return -1;
/*
* Make sure in a FLQ, atleast 1 credit (8 FL buffers)
* remains unpopulated,otherwise HW thinks
* FLQ is empty.
*/
flq->pidx = flq->inc_idx = flq->credits - 8;
} else {
q->un.iq.flq_idx = -1;
}
/* Associate the IQ INTx handler. */
q->un.iq.iq_intx_handler = iq_intx_handler;
csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
} else if (type == CSIO_EGRESS) {
q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
- csio_wr_qstat_pgsz(hw));
csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
} else { /* Freelist */
q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
- csio_wr_qstat_pgsz(hw));
csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
}
return ret_idx;
}
/*
* csio_wr_iq_create_rsp - Response handler for IQ creation.
* @hw: The HW module.
* @mbp: Mailbox.
* @iq_idx: Ingress queue that got created.
*
* Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
*/
static int
csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
{
struct csio_iq_params iqp;
enum fw_retval retval;
uint32_t iq_id;
int flq_idx;
memset(&iqp, 0, sizeof(struct csio_iq_params));
csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
if (retval != FW_SUCCESS) {
csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
csio_q_iqid(hw, iq_idx) = iqp.iqid;
csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
csio_q_inc_idx(hw, iq_idx) = 0;
/* Actual iq-id. */
iq_id = iqp.iqid - hw->wrm.fw_iq_start;
/* Set the iq-id to iq map table. */
if (iq_id >= CSIO_MAX_IQ) {
csio_err(hw,
"Exceeding MAX_IQ(%d) supported!"
" iqid:%d rel_iqid:%d FW iq_start:%d\n",
CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
csio_q_set_intr_map(hw, iq_idx, iq_id);
/*
* During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
* ingress context of this queue. This will block interrupts to
* this queue until the next GTS write. Therefore, we do a
* 0-cidx increment GTS write for this queue just to clear the
* interrupt_sent bit. This will re-enable interrupts to this
* queue.
*/
csio_wr_sge_intr_enable(hw, iqp.physiqid);
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
if (flq_idx != -1) {
struct csio_q *flq = hw->wrm.q_arr[flq_idx];
csio_q_flid(hw, flq_idx) = iqp.fl0id;
csio_q_cidx(hw, flq_idx) = 0;
csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
/* Now update SGE about the buffers allocated during init */
csio_wr_ring_fldb(hw, flq);
}
mempool_free(mbp, hw->mb_mempool);
return 0;
}
/*
* csio_wr_iq_create - Configure an Ingress queue with FW.
* @hw: The HW module.
* @priv: Private data object.
* @iq_idx: Ingress queue index in the WR module.
* @vec: MSIX vector.
* @portid: PCIE Channel to be associated with this queue.
* @async: Is this a FW asynchronous message handling queue?
* @cbfn: Completion callback.
*
* This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
* with alloc/write bits set.
*/
int
csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
uint32_t vec, uint8_t portid, bool async,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct csio_mb *mbp;
struct csio_iq_params iqp;
int flq_idx;
memset(&iqp, 0, sizeof(struct csio_iq_params));
csio_q_portid(hw, iq_idx) = portid;
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
csio_err(hw, "IQ command out of memory!\n");
return -ENOMEM;
}
switch (hw->intr_mode) {
case CSIO_IM_INTX:
case CSIO_IM_MSI:
/* For interrupt forwarding queue only */
if (hw->intr_iq_idx == iq_idx)
iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
else
iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
iqp.iqandstindex =
csio_q_physiqid(hw, hw->intr_iq_idx);
break;
case CSIO_IM_MSIX:
iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
iqp.iqandstindex = (uint16_t)vec;
break;
case CSIO_IM_NONE:
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
/* Pass in the ingress queue cmd parameters */
iqp.pfn = hw->pfn;
iqp.vfn = 0;
iqp.iq_start = 1;
iqp.viid = 0;
iqp.type = FW_IQ_TYPE_FL_INT_CAP;
iqp.iqasynch = async;
if (csio_intr_coalesce_cnt)
iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
else
iqp.iqanus = X_UPDATESCHEDULING_TIMER;
iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
iqp.iqpciech = portid;
iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
switch (csio_q_wr_sz(hw, iq_idx)) {
case 16:
iqp.iqesize = 0; break;
case 32:
iqp.iqesize = 1; break;
case 64:
iqp.iqesize = 2; break;
case 128:
iqp.iqesize = 3; break;
}
iqp.iqsize = csio_q_size(hw, iq_idx) /
csio_q_wr_sz(hw, iq_idx);
iqp.iqaddr = csio_q_pstart(hw, iq_idx);
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
if (flq_idx != -1) {
struct csio_q *flq = hw->wrm.q_arr[flq_idx];
iqp.fl0paden = 1;
iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
iqp.fl0addr = csio_q_pstart(hw, flq_idx);
}
csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "Issue of IQ cmd failed!\n");
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
if (cbfn != NULL)
return 0;
return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
}
/*
* csio_wr_eq_create_rsp - Response handler for EQ creation.
* @hw: The HW module.
* @mbp: Mailbox.
* @eq_idx: Egress queue that got created.
*
* Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
*/
static int
csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
{
struct csio_eq_params eqp;
enum fw_retval retval;
memset(&eqp, 0, sizeof(struct csio_eq_params));
csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
if (retval != FW_SUCCESS) {
csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
csio_q_inc_idx(hw, eq_idx) = 0;
mempool_free(mbp, hw->mb_mempool);
return 0;
}
/*
* csio_wr_eq_create - Configure an Egress queue with FW.
* @hw: HW module.
* @priv: Private data.
* @eq_idx: Egress queue index in the WR module.
* @iq_idx: Associated ingress queue index.
* @cbfn: Completion callback.
*
* This API configures a offload egress queue with FW by issuing a
* FW_EQ_OFLD_CMD (with alloc + write ) mailbox.
*/
int
csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
int iq_idx, uint8_t portid,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct csio_mb *mbp;
struct csio_eq_params eqp;
memset(&eqp, 0, sizeof(struct csio_eq_params));
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
csio_err(hw, "EQ command out of memory!\n");
return -ENOMEM;
}
eqp.pfn = hw->pfn;
eqp.vfn = 0;
eqp.eqstart = 1;
eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
eqp.iqid = csio_q_iqid(hw, iq_idx);
eqp.fbmin = X_FETCHBURSTMIN_64B;
eqp.fbmax = X_FETCHBURSTMAX_512B;
eqp.cidxfthresh = 0;
eqp.pciechn = portid;
eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
eqp.eqaddr = csio_q_pstart(hw, eq_idx);
csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
&eqp, cbfn);
if (csio_mb_issue(hw, mbp)) {
csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
mempool_free(mbp, hw->mb_mempool);
return -EINVAL;
}
if (cbfn != NULL)
return 0;
return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
}
/*
* csio_wr_iq_destroy_rsp - Response handler for IQ removal.
* @hw: The HW module.
* @mbp: Mailbox.
* @iq_idx: Ingress queue that was freed.
*
* Handle FW_IQ_CMD (free) mailbox completion.
*/
static int
csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
{
enum fw_retval retval = csio_mb_fw_retval(mbp);
int rv = 0;
if (retval != FW_SUCCESS)
rv = -EINVAL;
mempool_free(mbp, hw->mb_mempool);
return rv;
}
/*
* csio_wr_iq_destroy - Free an ingress queue.
* @hw: The HW module.
* @priv: Private data object.
* @iq_idx: Ingress queue index to destroy
* @cbfn: Completion callback.
*
* This API frees an ingress queue by issuing the FW_IQ_CMD
* with the free bit set.
*/
static int
csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
void (*cbfn)(struct csio_hw *, struct csio_mb *))
{
int rv = 0;
struct csio_mb *mbp;
struct csio_iq_params iqp;
int flq_idx;
memset(&iqp, 0, sizeof(struct csio_iq_params));
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp)
return -ENOMEM;
iqp.pfn = hw->pfn;
iqp.vfn = 0;
iqp.iqid = csio_q_iqid(hw, iq_idx);
iqp.type = FW_IQ_TYPE_FL_INT_CAP;
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
if (flq_idx != -1)
iqp.fl0id = csio_q_flid(hw, flq_idx);
else
iqp.fl0id = 0xFFFF;
iqp.fl1id = 0xFFFF;
csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
rv = csio_mb_issue(hw, mbp);
if (rv != 0) {
mempool_free(mbp, hw->mb_mempool);
return rv;
}
if (cbfn != NULL)
return 0;
return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
}
/*
* csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
* @hw: The HW module.
* @mbp: Mailbox.
* @eq_idx: Egress queue that was freed.
*
* Handle FW_OFLD_EQ_CMD (free) mailbox completion.
*/
static int
csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
{
enum fw_retval retval = csio_mb_fw_retval(mbp);
int rv = 0;
if (retval != FW_SUCCESS)
rv = -EINVAL;
mempool_free(mbp, hw->mb_mempool);
return rv;
}
/*
* csio_wr_eq_destroy - Free an Egress queue.
* @hw: The HW module.
* @priv: Private data object.
* @eq_idx: Egress queue index to destroy
* @cbfn: Completion callback.
*
* This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
* with the free bit set.
*/
static int
csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
int rv = 0;
struct csio_mb *mbp;
struct csio_eq_params eqp;
memset(&eqp, 0, sizeof(struct csio_eq_params));
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp)
return -ENOMEM;
eqp.pfn = hw->pfn;
eqp.vfn = 0;
eqp.eqid = csio_q_eqid(hw, eq_idx);
csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
rv = csio_mb_issue(hw, mbp);
if (rv != 0) {
mempool_free(mbp, hw->mb_mempool);
return rv;
}
if (cbfn != NULL)
return 0;
return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
}
/*
* csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
* @hw: HW module
* @qidx: Egress queue index
*
* Cleanup the Egress queue status page.
*/
static void
csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
{
struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
memset(stp, 0, sizeof(*stp));
}
/*
* csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
* @hw: HW module
* @qidx: Ingress queue index
*
* Cleanup the footer entries in the given ingress queue,
* set to 1 the internal copy of genbit.
*/
static void
csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *q = wrm->q_arr[qidx];
void *wr;
struct csio_iqwr_footer *ftr;
uint32_t i = 0;
/* set to 1 since we are just about zero out genbit */
q->un.iq.genbit = 1;
for (i = 0; i < q->credits; i++) {
/* Get the WR */
wr = (void *)((uintptr_t)q->vstart +
(i * q->wr_sz));
/* Get the footer */
ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
(q->wr_sz - sizeof(*ftr)));
/* Zero out footer */
memset(ftr, 0, sizeof(*ftr));
}
}
int
csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
{
int i, flq_idx;
struct csio_q *q;
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
int rv;
for (i = 0; i < wrm->free_qidx; i++) {
q = wrm->q_arr[i];
switch (q->type) {
case CSIO_EGRESS:
if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
csio_wr_cleanup_eq_stpg(hw, i);
if (!cmd) {
csio_q_eqid(hw, i) = CSIO_MAX_QID;
continue;
}
rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
cmd = false;
csio_q_eqid(hw, i) = CSIO_MAX_QID;
}
case CSIO_INGRESS:
if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
csio_wr_cleanup_iq_ftr(hw, i);
if (!cmd) {
csio_q_iqid(hw, i) = CSIO_MAX_QID;
flq_idx = csio_q_iq_flq_idx(hw, i);
if (flq_idx != -1)
csio_q_flid(hw, flq_idx) =
CSIO_MAX_QID;
continue;
}
rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
cmd = false;
csio_q_iqid(hw, i) = CSIO_MAX_QID;
flq_idx = csio_q_iq_flq_idx(hw, i);
if (flq_idx != -1)
csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
}
default:
break;
}
}
hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
return 0;
}
/*
* csio_wr_get - Get requested size of WR entry/entries from queue.
* @hw: HW module.
* @qidx: Index of queue.
* @size: Cumulative size of Work request(s).
* @wrp: Work request pair.
*
* If requested credits are available, return the start address of the
* work request in the work request pair. Set pidx accordingly and
* return.
*
* NOTE about WR pair:
* ==================
* A WR can start towards the end of a queue, and then continue at the
* beginning, since the queue is considered to be circular. This will
* require a pair of address/size to be passed back to the caller -
* hence Work request pair format.
*/
int
csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
struct csio_wr_pair *wrp)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *q = wrm->q_arr[qidx];
void *cwr = (void *)((uintptr_t)(q->vstart) +
(q->pidx * CSIO_QCREDIT_SZ));
struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
uint16_t cidx = q->cidx = ntohs(stp->cidx);
uint16_t pidx = q->pidx;
uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
int req_credits = req_sz / CSIO_QCREDIT_SZ;
int credits;
CSIO_DB_ASSERT(q->owner != NULL);
CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
CSIO_DB_ASSERT(cidx <= q->credits);
/* Calculate credits */
if (pidx > cidx) {
credits = q->credits - (pidx - cidx) - 1;
} else if (cidx > pidx) {
credits = cidx - pidx - 1;
} else {
/* cidx == pidx, empty queue */
credits = q->credits;
CSIO_INC_STATS(q, n_qempty);
}
/*
* Check if we have enough credits.
* credits = 1 implies queue is full.
*/
if (!credits || (req_credits > credits)) {
CSIO_INC_STATS(q, n_qfull);
return -EBUSY;
}
/*
* If we are here, we have enough credits to satisfy the
* request. Check if we are near the end of q, and if WR spills over.
* If it does, use the first addr/size to cover the queue until
* the end. Fit the remainder portion of the request at the top
* of queue and return it in the second addr/len. Set pidx
* accordingly.
*/
if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
wrp->addr1 = cwr;
wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
wrp->addr2 = q->vstart;
wrp->size2 = req_sz - wrp->size1;
q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
CSIO_QCREDIT_SZ);
CSIO_INC_STATS(q, n_qwrap);
CSIO_INC_STATS(q, n_eq_wr_split);
} else {
wrp->addr1 = cwr;
wrp->size1 = req_sz;
wrp->addr2 = NULL;
wrp->size2 = 0;
q->pidx += (uint16_t)req_credits;
/* We are the end of queue, roll back pidx to top of queue */
if (unlikely(q->pidx == q->credits)) {
q->pidx = 0;
CSIO_INC_STATS(q, n_qwrap);
}
}
q->inc_idx = (uint16_t)req_credits;
CSIO_INC_STATS(q, n_tot_reqs);
return 0;
}
/*
* csio_wr_copy_to_wrp - Copies given data into WR.
* @data_buf - Data buffer
* @wrp - Work request pair.
* @wr_off - Work request offset.
* @data_len - Data length.
*
* Copies the given data in Work Request. Work request pair(wrp) specifies
* address information of Work request.
* Returns: none
*/
void
csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
uint32_t wr_off, uint32_t data_len)
{
uint32_t nbytes;
/* Number of space available in buffer addr1 of WRP */
nbytes = ((wrp->size1 - wr_off) >= data_len) ?
data_len : (wrp->size1 - wr_off);
memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
data_len -= nbytes;
/* Write the remaining data from the begining of circular buffer */
if (data_len) {
CSIO_DB_ASSERT(data_len <= wrp->size2);
CSIO_DB_ASSERT(wrp->addr2 != NULL);
memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
}
}
/*
* csio_wr_issue - Notify chip of Work request.
* @hw: HW module.
* @qidx: Index of queue.
* @prio: 0: Low priority, 1: High priority
*
* Rings the SGE Doorbell by writing the current producer index of the passed
* in queue into the register.
*
*/
int
csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *q = wrm->q_arr[qidx];
CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
wmb();
/* Ring SGE Doorbell writing q->pidx into it */
csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));
q->inc_idx = 0;
return 0;
}
static inline uint32_t
csio_wr_avail_qcredits(struct csio_q *q)
{
if (q->pidx > q->cidx)
return q->pidx - q->cidx;
else if (q->cidx > q->pidx)
return q->credits - (q->cidx - q->pidx);
else
return 0; /* cidx == pidx, empty queue */
}
/*
* csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
* @hw: HW module.
* @flq: The freelist queue.
*
* Invalidate the driver's version of a freelist buffer entry,
* without freeing the associated the DMA memory. The entry
* to be invalidated is picked up from the current Free list
* queue cidx.
*
*/
static inline void
csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
{
flq->cidx++;
if (flq->cidx == flq->credits) {
flq->cidx = 0;
CSIO_INC_STATS(flq, n_qwrap);
}
}
/*
* csio_wr_process_fl - Process a freelist completion.
* @hw: HW module.
* @q: The ingress queue attached to the Freelist.
* @wr: The freelist completion WR in the ingress queue.
* @len_to_qid: The lower 32-bits of the first flit of the RSP footer
* @iq_handler: Caller's handler for this completion.
* @priv: Private pointer of caller
*
*/
static inline void
csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
void *wr, uint32_t len_to_qid,
void (*iq_handler)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *priv)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
struct csio_fl_dma_buf flb;
struct csio_dma_buf *buf, *fbuf;
uint32_t bufsz, len, lastlen = 0;
int n;
struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
CSIO_DB_ASSERT(flq != NULL);
len = len_to_qid;
if (len & IQWRF_NEWBUF) {
if (flq->un.fl.offset > 0) {
csio_wr_inval_flq_buf(hw, flq);
flq->un.fl.offset = 0;
}
len = IQWRF_LEN_GET(len);
}
CSIO_DB_ASSERT(len != 0);
flb.totlen = len;
/* Consume all freelist buffers used for len bytes */
for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
buf = &flq->un.fl.bufs[flq->cidx];
bufsz = csio_wr_fl_bufsz(sge, buf);
fbuf->paddr = buf->paddr;
fbuf->vaddr = buf->vaddr;
flb.offset = flq->un.fl.offset;
lastlen = min(bufsz, len);
fbuf->len = lastlen;
len -= lastlen;
if (!len)
break;
csio_wr_inval_flq_buf(hw, flq);
}
flb.defer_free = flq->un.fl.packen ? 0 : 1;
iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
&flb, priv);
if (flq->un.fl.packen)
flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
else
csio_wr_inval_flq_buf(hw, flq);
}
/*
* csio_is_new_iqwr - Is this a new Ingress queue entry ?
* @q: Ingress quueue.
* @ftr: Ingress queue WR SGE footer.
*
* The entry is new if our generation bit matches the corresponding
* bit in the footer of the current WR.
*/
static inline bool
csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
{
return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
}
/*
* csio_wr_process_iq - Process elements in Ingress queue.
* @hw: HW pointer
* @qidx: Index of queue
* @iq_handler: Handler for this queue
* @priv: Caller's private pointer
*
* This routine walks through every entry of the ingress queue, calling
* the provided iq_handler with the entry, until the generation bit
* flips.
*/
int
csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
void (*iq_handler)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *priv)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
struct csio_iqwr_footer *ftr;
uint32_t wr_type, fw_qid, qid;
struct csio_q *q_completed;
struct csio_q *flq = csio_iq_has_fl(q) ?
wrm->q_arr[q->un.iq.flq_idx] : NULL;
int rv = 0;
/* Get the footer */
ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
(q->wr_sz - sizeof(*ftr)));
/*
* When q wrapped around last time, driver should have inverted
* ic.genbit as well.
*/
while (csio_is_new_iqwr(q, ftr)) {
CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
(uintptr_t)q->vwrap);
rmb();
wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
switch (wr_type) {
case X_RSPD_TYPE_CPL:
/* Subtract footer from WR len */
iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
break;
case X_RSPD_TYPE_FLBUF:
csio_wr_process_fl(hw, q, wr,
ntohl(ftr->pldbuflen_qid),
iq_handler, priv);
break;
case X_RSPD_TYPE_INTR:
fw_qid = ntohl(ftr->pldbuflen_qid);
qid = fw_qid - wrm->fw_iq_start;
q_completed = hw->wrm.intr_map[qid];
if (unlikely(qid ==
csio_q_physiqid(hw, hw->intr_iq_idx))) {
/*
* We are already in the Forward Interrupt
* Interrupt Queue Service! Do-not service
* again!
*
*/
} else {
CSIO_DB_ASSERT(q_completed);
CSIO_DB_ASSERT(
q_completed->un.iq.iq_intx_handler);
/* Call the queue handler. */
q_completed->un.iq.iq_intx_handler(hw, NULL,
0, NULL, (void *)q_completed);
}
break;
default:
csio_warn(hw, "Unknown resp type 0x%x received\n",
wr_type);
CSIO_INC_STATS(q, n_rsp_unknown);
break;
}
/*
* Ingress *always* has fixed size WR entries. Therefore,
* there should always be complete WRs towards the end of
* queue.
*/
if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
/* Roll over to start of queue */
q->cidx = 0;
wr = q->vstart;
/* Toggle genbit */
q->un.iq.genbit ^= 0x1;
CSIO_INC_STATS(q, n_qwrap);
} else {
q->cidx++;
wr = (void *)((uintptr_t)(q->vstart) +
(q->cidx * q->wr_sz));
}
ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
(q->wr_sz - sizeof(*ftr)));
q->inc_idx++;
} /* while (q->un.iq.genbit == hdr->genbit) */
/*
* We need to re-arm SGE interrupts in case we got a stray interrupt,
* especially in msix mode. With INTx, this may be a common occurence.
*/
if (unlikely(!q->inc_idx)) {
CSIO_INC_STATS(q, n_stray_comp);
rv = -EINVAL;
goto restart;
}
/* Replenish free list buffers if pending falls below low water mark */
if (flq) {
uint32_t avail = csio_wr_avail_qcredits(flq);
if (avail <= 16) {
/* Make sure in FLQ, atleast 1 credit (8 FL buffers)
* remains unpopulated otherwise HW thinks
* FLQ is empty.
*/
csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
csio_wr_ring_fldb(hw, flq);
}
}
restart:
/* Now inform SGE about our incremental index value */
csio_wr_reg32(hw, CIDXINC(q->inc_idx) |
INGRESSQID(q->un.iq.physiqid) |
TIMERREG(csio_sge_timer_reg),
MYPF_REG(SGE_PF_GTS));
q->stats.n_tot_rsps += q->inc_idx;
q->inc_idx = 0;
return rv;
}
int
csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
void (*iq_handler)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *priv)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_q *iq = wrm->q_arr[qidx];
return csio_wr_process_iq(hw, iq, iq_handler, priv);
}
static int
csio_closest_timer(struct csio_sge *s, int time)
{
int i, delta, match = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
delta = time - s->timer_val[i];
if (delta < 0)
delta = -delta;
if (delta < min_delta) {
min_delta = delta;
match = i;
}
}
return match;
}
static int
csio_closest_thresh(struct csio_sge *s, int cnt)
{
int i, delta, match = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
delta = cnt - s->counter_val[i];
if (delta < 0)
delta = -delta;
if (delta < min_delta) {
min_delta = delta;
match = i;
}
}
return match;
}
static void
csio_wr_fixup_host_params(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
uint32_t clsz = L1_CACHE_BYTES;
uint32_t s_hps = PAGE_SHIFT - 10;
uint32_t ingpad = 0;
uint32_t stat_len = clsz > 64 ? 128 : 64;
csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
SGE_HOST_PAGE_SIZE);
sge->csio_fl_align = clsz < 32 ? 32 : clsz;
ingpad = ilog2(sge->csio_fl_align) - 5;
csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
EGRSTATUSPAGESIZE(1),
INGPADBOUNDARY(ingpad) |
EGRSTATUSPAGESIZE(stat_len != 64));
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
SGE_FL_BUFFER_SIZE2);
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
SGE_FL_BUFFER_SIZE3);
csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
/* default value of rx_dma_offset of the NIC driver */
csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
}
static void
csio_init_intr_coalesce_parms(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
if (csio_intr_coalesce_cnt) {
csio_sge_thresh_reg = 0;
csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
return;
}
csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
}
/*
* csio_wr_get_sge - Get SGE register values.
* @hw: HW module.
*
* Used by non-master functions and by master-functions relying on config file.
*/
static void
csio_wr_get_sge(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
uint32_t ingpad;
int i;
u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
u32 ingress_rx_threshold;
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
ingpad = INGPADBOUNDARY_GET(sge->sge_control);
switch (ingpad) {
case X_INGPCIEBOUNDARY_32B:
sge->csio_fl_align = 32; break;
case X_INGPCIEBOUNDARY_64B:
sge->csio_fl_align = 64; break;
case X_INGPCIEBOUNDARY_128B:
sge->csio_fl_align = 128; break;
case X_INGPCIEBOUNDARY_256B:
sge->csio_fl_align = 256; break;
case X_INGPCIEBOUNDARY_512B:
sge->csio_fl_align = 512; break;
case X_INGPCIEBOUNDARY_1024B:
sge->csio_fl_align = 1024; break;
case X_INGPCIEBOUNDARY_2048B:
sge->csio_fl_align = 2048; break;
case X_INGPCIEBOUNDARY_4096B:
sge->csio_fl_align = 4096; break;
}
for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
csio_get_flbuf_size(hw, sge, i);
timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE0_GET(timer_value_0_and_1));
sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE1_GET(timer_value_0_and_1));
sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE2_GET(timer_value_2_and_3));
sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE3_GET(timer_value_2_and_3));
sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE4_GET(timer_value_4_and_5));
sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
TIMERVALUE5_GET(timer_value_4_and_5));
ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
csio_init_intr_coalesce_parms(hw);
}
/*
* csio_wr_set_sge - Initialize SGE registers
* @hw: HW module.
*
* Used by Master function to initialize SGE registers in the absence
* of a config file.
*/
static void
csio_wr_set_sge(struct csio_hw *hw)
{
struct csio_wrm *wrm = csio_hw_to_wrm(hw);
struct csio_sge *sge = &wrm->sge;
int i;
/*
* Set up our basic SGE mode to deliver CPL messages to our Ingress
* Queue and Packet Date to the Free List.
*/
csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
/*
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
HP_INT_THRESH(HP_INT_THRESH_MASK) |
LP_INT_THRESH(LP_INT_THRESH_MASK),
HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));
csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
ENABLE_DROP);
/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);
CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);
CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
csio_get_flbuf_size(hw, sge, i);
/* Initialize interrupt coalescing attributes */
sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
THRESHOLD_1(sge->counter_val[1]) |
THRESHOLD_2(sge->counter_val[2]) |
THRESHOLD_3(sge->counter_val[3]),
SGE_INGRESS_RX_THRESHOLD);
csio_wr_reg32(hw,
TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
SGE_TIMER_VALUE_0_AND_1);
csio_wr_reg32(hw,
TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
SGE_TIMER_VALUE_2_AND_3);
csio_wr_reg32(hw,
TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
SGE_TIMER_VALUE_4_AND_5);
csio_init_intr_coalesce_parms(hw);
}
void
csio_wr_sge_init(struct csio_hw *hw)
{
/*
* If we are master:
* - If we plan to use the config file, we need to fixup some
* host specific registers, and read the rest of the SGE
* configuration.
* - If we dont plan to use the config file, we need to initialize
* SGE entirely, including fixing the host specific registers.
* If we arent the master, we are only allowed to read and work off of
* the already initialized SGE values.
*
* Therefore, before calling this function, we assume that the master-
* ship of the card, and whether to use config file or not, have
* already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and
* CSIO_HWF_MASTER should be set/unset.
*/
if (csio_is_hw_master(hw)) {
csio_wr_fixup_host_params(hw);
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
csio_wr_get_sge(hw);
else
csio_wr_set_sge(hw);
} else
csio_wr_get_sge(hw);
}
/*
* csio_wrm_init - Initialize Work request module.
* @wrm: WR module
* @hw: HW pointer
*
* Allocates memory for an array of queue pointers starting at q_arr.
*/
int
csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
{
int i;
if (!wrm->num_q) {
csio_err(hw, "Num queues is not set\n");
return -EINVAL;
}
wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
if (!wrm->q_arr)
goto err;
for (i = 0; i < wrm->num_q; i++) {
wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
if (!wrm->q_arr[i]) {
while (--i >= 0)
kfree(wrm->q_arr[i]);
goto err_free_arr;
}
}
wrm->free_qidx = 0;
return 0;
err_free_arr:
kfree(wrm->q_arr);
err:
return -ENOMEM;
}
/*
* csio_wrm_exit - Initialize Work request module.
* @wrm: WR module
* @hw: HW module
*
* Uninitialize WR module. Free q_arr and pointers in it.
* We have the additional job of freeing the DMA memory associated
* with the queues.
*/
void
csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
{
int i;
uint32_t j;
struct csio_q *q;
struct csio_dma_buf *buf;
for (i = 0; i < wrm->num_q; i++) {
q = wrm->q_arr[i];
if (wrm->free_qidx && (i < wrm->free_qidx)) {
if (q->type == CSIO_FREELIST) {
if (!q->un.fl.bufs)
continue;
for (j = 0; j < q->credits; j++) {
buf = &q->un.fl.bufs[j];
if (!buf->vaddr)
continue;
pci_free_consistent(hw->pdev, buf->len,
buf->vaddr,
buf->paddr);
}
kfree(q->un.fl.bufs);
}
pci_free_consistent(hw->pdev, q->size,
q->vstart, q->pstart);
}
kfree(q);
}
hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
kfree(wrm->q_arr);
}
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_WR_H__
#define __CSIO_WR_H__
#include <linux/cache.h>
#include "csio_defs.h"
#include "t4fw_api.h"
#include "t4fw_api_stor.h"
/*
* SGE register field values.
*/
#define X_INGPCIEBOUNDARY_32B 0
#define X_INGPCIEBOUNDARY_64B 1
#define X_INGPCIEBOUNDARY_128B 2
#define X_INGPCIEBOUNDARY_256B 3
#define X_INGPCIEBOUNDARY_512B 4
#define X_INGPCIEBOUNDARY_1024B 5
#define X_INGPCIEBOUNDARY_2048B 6
#define X_INGPCIEBOUNDARY_4096B 7
/* GTS register */
#define X_TIMERREG_COUNTER0 0
#define X_TIMERREG_COUNTER1 1
#define X_TIMERREG_COUNTER2 2
#define X_TIMERREG_COUNTER3 3
#define X_TIMERREG_COUNTER4 4
#define X_TIMERREG_COUNTER5 5
#define X_TIMERREG_RESTART_COUNTER 6
#define X_TIMERREG_UPDATE_CIDX 7
/*
* Egress Context field values
*/
#define X_FETCHBURSTMIN_16B 0
#define X_FETCHBURSTMIN_32B 1
#define X_FETCHBURSTMIN_64B 2
#define X_FETCHBURSTMIN_128B 3
#define X_FETCHBURSTMAX_64B 0
#define X_FETCHBURSTMAX_128B 1
#define X_FETCHBURSTMAX_256B 2
#define X_FETCHBURSTMAX_512B 3
#define X_HOSTFCMODE_NONE 0
#define X_HOSTFCMODE_INGRESS_QUEUE 1
#define X_HOSTFCMODE_STATUS_PAGE 2
#define X_HOSTFCMODE_BOTH 3
/*
* Ingress Context field values
*/
#define X_UPDATESCHEDULING_TIMER 0
#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
#define X_UPDATEDELIVERY_NONE 0
#define X_UPDATEDELIVERY_INTERRUPT 1
#define X_UPDATEDELIVERY_STATUS_PAGE 2
#define X_UPDATEDELIVERY_BOTH 3
#define X_INTERRUPTDESTINATION_PCIE 0
#define X_INTERRUPTDESTINATION_IQ 1
#define X_RSPD_TYPE_FLBUF 0
#define X_RSPD_TYPE_CPL 1
#define X_RSPD_TYPE_INTR 2
/* WR status is at the same position as retval in a CMD header */
#define csio_wr_status(_wr) \
(FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
struct csio_hw;
extern int csio_intr_coalesce_cnt;
extern int csio_intr_coalesce_time;
/* Ingress queue params */
struct csio_iq_params {
uint8_t iq_start:1;
uint8_t iq_stop:1;
uint8_t pfn:3;
uint8_t vfn;
uint16_t physiqid;
uint16_t iqid;
uint16_t fl0id;
uint16_t fl1id;
uint8_t viid;
uint8_t type;
uint8_t iqasynch;
uint8_t reserved4;
uint8_t iqandst;
uint8_t iqanus;
uint8_t iqanud;
uint16_t iqandstindex;
uint8_t iqdroprss;
uint8_t iqpciech;
uint8_t iqdcaen;
uint8_t iqdcacpu;
uint8_t iqintcntthresh;
uint8_t iqo;
uint8_t iqcprio;
uint8_t iqesize;
uint16_t iqsize;
uint64_t iqaddr;
uint8_t iqflintiqhsen;
uint8_t reserved5;
uint8_t iqflintcongen;
uint8_t iqflintcngchmap;
uint32_t reserved6;
uint8_t fl0hostfcmode;
uint8_t fl0cprio;
uint8_t fl0paden;
uint8_t fl0packen;
uint8_t fl0congen;
uint8_t fl0dcaen;
uint8_t fl0dcacpu;
uint8_t fl0fbmin;
uint8_t fl0fbmax;
uint8_t fl0cidxfthresho;
uint8_t fl0cidxfthresh;
uint16_t fl0size;
uint64_t fl0addr;
uint64_t reserved7;
uint8_t fl1hostfcmode;
uint8_t fl1cprio;
uint8_t fl1paden;
uint8_t fl1packen;
uint8_t fl1congen;
uint8_t fl1dcaen;
uint8_t fl1dcacpu;
uint8_t fl1fbmin;
uint8_t fl1fbmax;
uint8_t fl1cidxfthresho;
uint8_t fl1cidxfthresh;
uint16_t fl1size;
uint64_t fl1addr;
};
/* Egress queue params */
struct csio_eq_params {
uint8_t pfn;
uint8_t vfn;
uint8_t eqstart:1;
uint8_t eqstop:1;
uint16_t physeqid;
uint32_t eqid;
uint8_t hostfcmode:2;
uint8_t cprio:1;
uint8_t pciechn:3;
uint16_t iqid;
uint8_t dcaen:1;
uint8_t dcacpu:5;
uint8_t fbmin:3;
uint8_t fbmax:3;
uint8_t cidxfthresho:1;
uint8_t cidxfthresh:3;
uint16_t eqsize;
uint64_t eqaddr;
};
struct csio_dma_buf {
struct list_head list;
void *vaddr; /* Virtual address */
dma_addr_t paddr; /* Physical address */
uint32_t len; /* Buffer size */
};
/* Generic I/O request structure */
struct csio_ioreq {
struct csio_sm sm; /* SM, List
* should be the first member
*/
int iq_idx; /* Ingress queue index */
int eq_idx; /* Egress queue index */
uint32_t nsge; /* Number of SG elements */
uint32_t tmo; /* Driver timeout */
uint32_t datadir; /* Data direction */
struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */
uint16_t wr_status; /* WR completion status */
int16_t drv_status; /* Driver internal status */
struct csio_lnode *lnode; /* Owner lnode */
struct csio_rnode *rnode; /* Src/destination rnode */
void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);
/* completion callback */
void *scratch1; /* Scratch area 1.
*/
void *scratch2; /* Scratch area 2. */
struct list_head gen_list; /* Any list associated with
* this ioreq.
*/
uint64_t fw_handle; /* Unique handle passed
* to FW
*/
uint8_t dcopy; /* Data copy required */
uint8_t reserved1;
uint16_t reserved2;
struct completion cmplobj; /* ioreq completion object */
} ____cacheline_aligned_in_smp;
/*
* Egress status page for egress cidx updates
*/
struct csio_qstatus_page {
__be32 qid;
__be16 cidx;
__be16 pidx;
};
enum {
CSIO_MAX_FLBUF_PER_IQWR = 4,
CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments
* in bytes
*/
CSIO_MAX_QID = 0xFFFF,
CSIO_MAX_IQ = 128,
CSIO_SGE_NTIMERS = 6,
CSIO_SGE_NCOUNTERS = 4,
CSIO_SGE_FL_SIZE_REGS = 16,
};
/* Defines for type */
enum {
CSIO_EGRESS = 1,
CSIO_INGRESS = 2,
CSIO_FREELIST = 3,
};
/*
* Structure for footer (last 2 flits) of Ingress Queue Entry.
*/
struct csio_iqwr_footer {
__be32 hdrbuflen_pidx;
__be32 pldbuflen_qid;
union {
u8 type_gen;
__be64 last_flit;
} u;
};
#define IQWRF_NEWBUF (1 << 31)
#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)
#define IQWRF_GEN_SHIFT 7
#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)
/*
* WR pair:
* ========
* A WR can start towards the end of a queue, and then continue at the
* beginning, since the queue is considered to be circular. This will
* require a pair of address/len to be passed back to the caller -
* hence the Work request pair structure.
*/
struct csio_wr_pair {
void *addr1;
uint32_t size1;
void *addr2;
uint32_t size2;
};
/*
* The following structure is used by ingress processing to return the
* free list buffers to consumers.
*/
struct csio_fl_dma_buf {
struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR];
/* Freelist DMA buffers */
int offset; /* Offset within the
* first FL buf.
*/
uint32_t totlen; /* Total length */
uint8_t defer_free; /* Free of buffer can
* deferred
*/
};
/* Data-types */
typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *, void *);
struct csio_iq {
uint16_t iqid; /* Queue ID */
uint16_t physiqid; /* Physical Queue ID */
uint16_t genbit; /* Generation bit,
* initially set to 1
*/
int flq_idx; /* Freelist queue index */
iq_handler_t iq_intx_handler; /* IQ INTx handler routine */
};
struct csio_eq {
uint16_t eqid; /* Qid */
uint16_t physeqid; /* Physical Queue ID */
uint8_t wrap[512]; /* Temp area for q-wrap around*/
};
struct csio_fl {
uint16_t flid; /* Qid */
uint16_t packen; /* Packing enabled? */
int offset; /* Offset within FL buf */
int sreg; /* Size register */
struct csio_dma_buf *bufs; /* Free list buffer ptr array
* indexed using flq->cidx/pidx
*/
};
struct csio_qstats {
uint32_t n_tot_reqs; /* Total no. of Requests */
uint32_t n_tot_rsps; /* Total no. of responses */
uint32_t n_qwrap; /* Queue wraps */
uint32_t n_eq_wr_split; /* Number of split EQ WRs */
uint32_t n_qentry; /* Queue entry */
uint32_t n_qempty; /* Queue empty */
uint32_t n_qfull; /* Queue fulls */
uint32_t n_rsp_unknown; /* Unknown response type */
uint32_t n_stray_comp; /* Stray completion intr */
uint32_t n_flq_refill; /* Number of FL refills */
};
/* Queue metadata */
struct csio_q {
uint16_t type; /* Type: Ingress/Egress/FL */
uint16_t pidx; /* producer index */
uint16_t cidx; /* consumer index */
uint16_t inc_idx; /* Incremental index */
uint32_t wr_sz; /* Size of all WRs in this q
* if fixed
*/
void *vstart; /* Base virtual address
* of queue
*/
void *vwrap; /* Virtual end address to
* wrap around at
*/
uint32_t credits; /* Size of queue in credits */
void *owner; /* Owner */
union { /* Queue contexts */
struct csio_iq iq;
struct csio_eq eq;
struct csio_fl fl;
} un;
dma_addr_t pstart; /* Base physical address of
* queue
*/
uint32_t portid; /* PCIE Channel */
uint32_t size; /* Size of queue in bytes */
struct csio_qstats stats; /* Statistics */
} ____cacheline_aligned_in_smp;
struct csio_sge {
uint32_t csio_fl_align; /* Calculated and cached
* for fast path
*/
uint32_t sge_control; /* padding, boundaries,
* lengths, etc.
*/
uint32_t sge_host_page_size; /* Host page size */
uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];
/* free list buffer sizes */
uint16_t timer_val[CSIO_SGE_NTIMERS];
uint8_t counter_val[CSIO_SGE_NCOUNTERS];
};
/* Work request module */
struct csio_wrm {
int num_q; /* Number of queues */
struct csio_q **q_arr; /* Array of queue pointers
* allocated dynamically
* based on configured values
*/
uint32_t fw_iq_start; /* Start ID of IQ for this fn*/
uint32_t fw_eq_start; /* Start ID of EQ for this fn*/
struct csio_q *intr_map[CSIO_MAX_IQ];
/* IQ-id to IQ map table. */
int free_qidx; /* queue idx of free queue */
struct csio_sge sge; /* SGE params */
};
#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])
#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)
#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)
#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)
#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)
#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)
#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)
#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)
#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)
#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
#define csio_q_physiqid(__hw, __idx) \
((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
#define csio_q_iq_flq_idx(__hw, __idx) \
((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
#define csio_q_physeqid(__hw, __idx) \
((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
#define csio_q_iq_to_flid(__hw, __iq_idx) \
csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \
(__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
struct csio_mb;
int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,
uint16_t, void *, uint32_t, int, iq_handler_t);
int csio_wr_iq_create(struct csio_hw *, void *, int,
uint32_t, uint8_t, bool,
void (*)(struct csio_hw *, struct csio_mb *));
int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,
void (*)(struct csio_hw *, struct csio_mb *));
int csio_wr_destroy_queues(struct csio_hw *, bool cmd);
int csio_wr_get(struct csio_hw *, int, uint32_t,
struct csio_wr_pair *);
void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);
int csio_wr_issue(struct csio_hw *, int, bool);
int csio_wr_process_iq(struct csio_hw *, struct csio_q *,
void (*)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *);
int csio_wr_process_iq_idx(struct csio_hw *, int,
void (*)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *);
void csio_wr_sge_init(struct csio_hw *);
int csio_wrm_init(struct csio_wrm *, struct csio_hw *);
void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);
#endif /* ifndef __CSIO_WR_H__ */
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _T4FW_API_STOR_H_
#define _T4FW_API_STOR_H_
/******************************************************************************
* R E T U R N V A L U E S
********************************/
enum fw_retval {
FW_SUCCESS = 0, /* completed sucessfully */
FW_EPERM = 1, /* operation not permitted */
FW_ENOENT = 2, /* no such file or directory */
FW_EIO = 5, /* input/output error; hw bad */
FW_ENOEXEC = 8, /* exec format error; inv microcode */
FW_EAGAIN = 11, /* try again */
FW_ENOMEM = 12, /* out of memory */
FW_EFAULT = 14, /* bad address; fw bad */
FW_EBUSY = 16, /* resource busy */
FW_EEXIST = 17, /* file exists */
FW_EINVAL = 22, /* invalid argument */
FW_ENOSPC = 28, /* no space left on device */
FW_ENOSYS = 38, /* functionality not implemented */
FW_EPROTO = 71, /* protocol error */
FW_EADDRINUSE = 98, /* address already in use */
FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
FW_ENETDOWN = 100, /* network is down */
FW_ENETUNREACH = 101, /* network is unreachable */
FW_ENOBUFS = 105, /* no buffer space available */
FW_ETIMEDOUT = 110, /* timeout */
FW_EINPROGRESS = 115, /* fw internal */
FW_SCSI_ABORT_REQUESTED = 128, /* */
FW_SCSI_ABORT_TIMEDOUT = 129, /* */
FW_SCSI_ABORTED = 130, /* */
FW_SCSI_CLOSE_REQUESTED = 131, /* */
FW_ERR_LINK_DOWN = 132, /* */
FW_RDEV_NOT_READY = 133, /* */
FW_ERR_RDEV_LOST = 134, /* */
FW_ERR_RDEV_LOGO = 135, /* */
FW_FCOE_NO_XCHG = 136, /* */
FW_SCSI_RSP_ERR = 137, /* */
FW_ERR_RDEV_IMPL_LOGO = 138, /* */
FW_SCSI_UNDER_FLOW_ERR = 139, /* */
FW_SCSI_OVER_FLOW_ERR = 140, /* */
FW_SCSI_DDP_ERR = 141, /* DDP error*/
FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
};
enum fw_fcoe_link_sub_op {
FCOE_LINK_DOWN = 0x0,
FCOE_LINK_UP = 0x1,
FCOE_LINK_COND = 0x2,
};
enum fw_fcoe_link_status {
FCOE_LINKDOWN = 0x0,
FCOE_LINKUP = 0x1,
};
enum fw_ofld_prot {
PROT_FCOE = 0x1,
PROT_ISCSI = 0x2,
};
enum rport_type_fcoe {
FLOGI_VFPORT = 0x1, /* 0xfffffe */
FDISC_VFPORT = 0x2, /* 0xfffffe */
NS_VNPORT = 0x3, /* 0xfffffc */
REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */
REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */
FDMI_VNPORT = 0x6, /* 0xfffffa */
FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */
};
enum event_cause_fcoe {
PLOGI_ACC_RCVD = 0x01,
PLOGI_RJT_RCVD = 0x02,
PLOGI_RCVD = 0x03,
PLOGO_RCVD = 0x04,
PRLI_ACC_RCVD = 0x05,
PRLI_RJT_RCVD = 0x06,
PRLI_RCVD = 0x07,
PRLO_RCVD = 0x08,
NPORT_ID_CHGD = 0x09,
FLOGO_RCVD = 0x0a,
CLR_VIRT_LNK_RCVD = 0x0b,
FLOGI_ACC_RCVD = 0x0c,
FLOGI_RJT_RCVD = 0x0d,
FDISC_ACC_RCVD = 0x0e,
FDISC_RJT_RCVD = 0x0f,
FLOGI_TMO_MAX_RETRY = 0x10,
IMPL_LOGO_ADISC_ACC = 0x11,
IMPL_LOGO_ADISC_RJT = 0x12,
IMPL_LOGO_ADISC_CNFLT = 0x13,
PRLI_TMO = 0x14,
ADISC_TMO = 0x15,
RSCN_DEV_LOST = 0x16,
SCR_ACC_RCVD = 0x17,
ADISC_RJT_RCVD = 0x18,
LOGO_SNT = 0x19,
PROTO_ERR_IMPL_LOGO = 0x1a,
};
enum fcoe_cmn_type {
FCOE_ELS,
FCOE_CT,
FCOE_SCSI_CMD,
FCOE_UNSOL_ELS,
};
enum fw_wr_stor_opcodes {
FW_RDEV_WR = 0x38,
FW_FCOE_ELS_CT_WR = 0x30,
FW_SCSI_WRITE_WR = 0x31,
FW_SCSI_READ_WR = 0x32,
FW_SCSI_CMD_WR = 0x33,
FW_SCSI_ABRT_CLS_WR = 0x34,
};
struct fw_rdev_wr {
__be32 op_to_immdlen;
__be32 alloc_to_len16;
__be64 cookie;
u8 protocol;
u8 event_cause;
u8 cur_state;
u8 prev_state;
__be32 flags_to_assoc_flowid;
union rdev_entry {
struct fcoe_rdev_entry {
__be32 flowid;
u8 protocol;
u8 event_cause;
u8 flags;
u8 rjt_reason;
u8 cur_login_st;
u8 prev_login_st;
__be16 rcv_fr_sz;
u8 rd_xfer_rdy_to_rport_type;
u8 vft_to_qos;
u8 org_proc_assoc_to_acc_rsp_code;
u8 enh_disc_to_tgt;
u8 wwnn[8];
u8 wwpn[8];
__be16 iqid;
u8 fc_oui[3];
u8 r_id[3];
} fcoe_rdev;
struct iscsi_rdev_entry {
__be32 flowid;
u8 protocol;
u8 event_cause;
u8 flags;
u8 r3;
__be16 iscsi_opts;
__be16 tcp_opts;
__be16 ip_opts;
__be16 max_rcv_len;
__be16 max_snd_len;
__be16 first_brst_len;
__be16 max_brst_len;
__be16 r4;
__be16 def_time2wait;
__be16 def_time2ret;
__be16 nop_out_intrvl;
__be16 non_scsi_to;
__be16 isid;
__be16 tsid;
__be16 port;
__be16 tpgt;
u8 r5[6];
__be16 iqid;
} iscsi_rdev;
} u;
};
#define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff)
#define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff)
#define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f)
#define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1)
#define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3)
#define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1)
#define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1)
#define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1)
#define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1)
#define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1)
struct fw_fcoe_els_ct_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 els_ct_type;
u8 ctl_pri;
u8 cp_en_class;
__be16 xfer_cnt;
u8 fl_to_sp;
u8 l_id[3];
u8 r5;
u8 r_id[3];
__be64 rsp_dmaaddr;
__be32 rsp_dmalen;
__be32 r6;
};
#define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24)
#define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff)
#define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0)
#define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff)
#define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0)
struct fw_scsi_write_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 use_xfer_cnt;
union fw_scsi_write_priv {
struct fcoe_write_priv {
u8 ctl_pri;
u8 cp_en_class;
u8 r3_lo[2];
} fcoe;
struct iscsi_write_priv {
u8 r3[4];
} iscsi;
} u;
__be32 xfer_cnt;
__be32 ini_xfer_cnt;
__be64 rsp_dmaaddr;
__be32 rsp_dmalen;
__be32 r4;
};
#define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0)
struct fw_scsi_read_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 use_xfer_cnt;
union fw_scsi_read_priv {
struct fcoe_read_priv {
u8 ctl_pri;
u8 cp_en_class;
u8 r3_lo[2];
} fcoe;
struct iscsi_read_priv {
u8 r3[4];
} iscsi;
} u;
__be32 xfer_cnt;
__be32 ini_xfer_cnt;
__be64 rsp_dmaaddr;
__be32 rsp_dmalen;
__be32 r4;
};
#define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0)
struct fw_scsi_cmd_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 r3;
union fw_scsi_cmd_priv {
struct fcoe_cmd_priv {
u8 ctl_pri;
u8 cp_en_class;
u8 r4_lo[2];
} fcoe;
struct iscsi_cmd_priv {
u8 r4[4];
} iscsi;
} u;
u8 r5[8];
__be64 rsp_dmaaddr;
__be32 rsp_dmalen;
__be32 r6;
};
#define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0)
#define SCSI_ABORT 0
#define SCSI_CLOSE 1
struct fw_scsi_abrt_cls_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 sub_opcode_to_chk_all_io;
u8 r3[4];
__be64 t_cookie;
};
#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2)
#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f)
#define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0)
enum fw_cmd_stor_opcodes {
FW_FCOE_RES_INFO_CMD = 0x31,
FW_FCOE_LINK_CMD = 0x32,
FW_FCOE_VNP_CMD = 0x33,
FW_FCOE_SPARAMS_CMD = 0x35,
FW_FCOE_STATS_CMD = 0x37,
FW_FCOE_FCF_CMD = 0x38,
};
struct fw_fcoe_res_info_cmd {
__be32 op_to_read;
__be32 retval_len16;
__be16 e_d_tov;
__be16 r_a_tov_seq;
__be16 r_a_tov_els;
__be16 r_r_tov;
__be32 max_xchgs;
__be32 max_ssns;
__be32 used_xchgs;
__be32 used_ssns;
__be32 max_fcfs;
__be32 max_vnps;
__be32 used_fcfs;
__be32 used_vnps;
};
struct fw_fcoe_link_cmd {
__be32 op_to_portid;
__be32 retval_len16;
__be32 sub_opcode_fcfi;
u8 r3;
u8 lstatus;
__be16 flags;
u8 r4;
u8 set_vlan;
__be16 vlan_id;
__be32 vnpi_pkd;
__be16 r6;
u8 phy_mac[6];
u8 vnport_wwnn[8];
u8 vnport_wwpn[8];
};
#define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0)
#define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
#define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U)
#define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0)
#define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff)
#define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
struct fw_fcoe_vnp_cmd {
__be32 op_to_fcfi;
__be32 alloc_to_len16;
__be32 gen_wwn_to_vnpi;
__be32 vf_id;
__be16 iqid;
u8 vnport_mac[6];
u8 vnport_wwnn[8];
u8 vnport_wwpn[8];
u8 cmn_srv_parms[16];
u8 clsp_word_0_1[8];
};
#define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0)
#define FW_FCOE_VNP_CMD_ALLOC (1U << 31)
#define FW_FCOE_VNP_CMD_FREE (1U << 30)
#define FW_FCOE_VNP_CMD_MODIFY (1U << 29)
#define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22)
#define FW_FCOE_VNP_CMD_VFID_EN (1U << 20)
#define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0)
#define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
struct fw_fcoe_sparams_cmd {
__be32 op_to_portid;
__be32 retval_len16;
u8 r3[7];
u8 cos;
u8 lport_wwnn[8];
u8 lport_wwpn[8];
u8 cmn_srv_parms[16];
u8 cls_srv_parms[16];
};
#define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0)
struct fw_fcoe_stats_cmd {
__be32 op_to_flowid;
__be32 free_to_len16;
union fw_fcoe_stats {
struct fw_fcoe_stats_ctl {
u8 nstats_port;
u8 port_valid_ix;
__be16 r6;
__be32 r7;
__be64 stat0;
__be64 stat1;
__be64 stat2;
__be64 stat3;
__be64 stat4;
__be64 stat5;
} ctl;
struct fw_fcoe_port_stats {
__be64 tx_bcast_bytes;
__be64 tx_bcast_frames;
__be64 tx_mcast_bytes;
__be64 tx_mcast_frames;
__be64 tx_ucast_bytes;
__be64 tx_ucast_frames;
__be64 tx_drop_frames;
__be64 tx_offload_bytes;
__be64 tx_offload_frames;
__be64 rx_bcast_bytes;
__be64 rx_bcast_frames;
__be64 rx_mcast_bytes;
__be64 rx_mcast_frames;
__be64 rx_ucast_bytes;
__be64 rx_ucast_frames;
__be64 rx_err_frames;
} port_stats;
struct fw_fcoe_fcf_stats {
__be32 fip_tx_bytes;
__be32 fip_tx_fr;
__be64 fcf_ka;
__be64 mcast_adv_rcvd;
__be16 ucast_adv_rcvd;
__be16 sol_sent;
__be16 vlan_req;
__be16 vlan_rpl;
__be16 clr_vlink;
__be16 link_down;
__be16 link_up;
__be16 logo;
__be16 flogi_req;
__be16 flogi_rpl;
__be16 fdisc_req;
__be16 fdisc_rpl;
__be16 fka_prd_chg;
__be16 fc_map_chg;
__be16 vfid_chg;
u8 no_fka_req;
u8 no_vnp;
} fcf_stats;
struct fw_fcoe_pcb_stats {
__be64 tx_bytes;
__be64 tx_frames;
__be64 rx_bytes;
__be64 rx_frames;
__be32 vnp_ka;
__be32 unsol_els_rcvd;
__be64 unsol_cmd_rcvd;
__be16 implicit_logo;
__be16 flogi_inv_sparm;
__be16 fdisc_inv_sparm;
__be16 flogi_rjt;
__be16 fdisc_rjt;
__be16 no_ssn;
__be16 mac_flt_fail;
__be16 inv_fr_rcvd;
} pcb_stats;
struct fw_fcoe_scb_stats {
__be64 tx_bytes;
__be64 tx_frames;
__be64 rx_bytes;
__be64 rx_frames;
__be32 host_abrt_req;
__be32 adap_auto_abrt;
__be32 adap_abrt_rsp;
__be32 host_ios_req;
__be16 ssn_offl_ios;
__be16 ssn_not_rdy_ios;
u8 rx_data_ddp_err;
u8 ddp_flt_set_err;
__be16 rx_data_fr_err;
u8 bad_st_abrt_req;
u8 no_io_abrt_req;
u8 abort_tmo;
u8 abort_tmo_2;
__be32 abort_req;
u8 no_ppod_res_tmo;
u8 bp_tmo;
u8 adap_auto_cls;
u8 no_io_cls_req;
__be32 host_cls_req;
__be64 unsol_cmd_rcvd;
__be32 plogi_req_rcvd;
__be32 prli_req_rcvd;
__be16 logo_req_rcvd;
__be16 prlo_req_rcvd;
__be16 plogi_rjt_rcvd;
__be16 prli_rjt_rcvd;
__be32 adisc_req_rcvd;
__be32 rscn_rcvd;
__be32 rrq_req_rcvd;
__be32 unsol_els_rcvd;
u8 adisc_rjt_rcvd;
u8 scr_rjt;
u8 ct_rjt;
u8 inval_bls_rcvd;
__be32 ba_rjt_rcvd;
} scb_stats;
} u;
};
#define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0)
#define FW_FCOE_STATS_CMD_FREE (1U << 30)
#define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4)
#define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0)
#define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7)
#define FW_FCOE_STATS_CMD_IX(x) ((x) << 0)
struct fw_fcoe_fcf_cmd {
__be32 op_to_fcfi;
__be32 retval_len16;
__be16 priority_pkd;
u8 mac[6];
u8 name_id[8];
u8 fabric[8];
__be16 vf_id;
__be16 max_fcoe_size;
u8 vlan_id;
u8 fc_map[3];
__be32 fka_adv;
__be32 r6;
u8 r7_hi;
u8 fpma_to_portid;
u8 spma_mac[6];
__be64 r8;
};
#define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0)
#define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff)
#define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff)
#define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1)
#define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1)
#define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1)
#define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
#endif /* _T4FW_API_STOR_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment