Commit cf4e6363 authored by Michael Chan's avatar Michael Chan Committed by James Bottomley

[SCSI] bnx2i: Add bnx2i iSCSI driver.

New iSCSI driver for Broadcom BNX2 devices.  The driver interfaces with
the CNIC driver to access the hardware.
Signed-off-by: default avatarAnil Veerabhadrappa <anilgv@broadcom.com>
Signed-off-by: default avatarMichael Chan <mchan@broadcom.com>
Signed-off-by: default avatarMike Christie <michaelc@cs.wisc.edu>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent a4636960
......@@ -354,6 +354,7 @@ config ISCSI_TCP
http://open-iscsi.org
source "drivers/scsi/cxgb3i/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
......
......@@ -129,6 +129,7 @@ obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_ARM) += arm/
......
/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#ifndef __57XX_ISCSI_CONSTANTS_H_
#define __57XX_ISCSI_CONSTANTS_H_
/**
* This file defines HSI constants for the iSCSI flows
*/
/* iSCSI request op codes */
#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
/* iSCSI response/messages op codes */
#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
/* iSCSI task types */
#define ISCSI_TASK_TYPE_READ (0)
#define ISCSI_TASK_TYPE_WRITE (1)
#define ISCSI_TASK_TYPE_MPATH (2)
/* initial CQ sequence numbers */
#define ISCSI_INITIAL_SN (1)
/* KWQ (kernel work queue) layer codes */
#define ISCSI_KWQE_LAYER_CODE (6)
/* KWQ (kernel work queue) request op codes */
#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
#define ISCSI_KWQE_OPCODE_INIT1 (4)
#define ISCSI_KWQE_OPCODE_INIT2 (5)
/* KCQ (kernel completion queue) response op codes */
#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
#define ISCSI_KCQE_OPCODE_INIT (0x14)
#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
/* KCQ (kernel completion queue) completion status */
#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
/* Response */
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
/* Data-In */
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
/* R2T */
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
/* TMF */
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
/* IP/TCP processing errors: */
#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
/* iSCSI licensing errors */
/* general iSCSI license not installed */
#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
/* additional LOM specific iSCSI license not installed */
#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
/* SQ/RQ/CQ DB structure sizes */
#define ISCSI_SQ_DB_SIZE (16)
#define ISCSI_RQ_DB_SIZE (16)
#define ISCSI_CQ_DB_SIZE (80)
#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
/* Page size codes (for flags field in connection offload request) */
#define ISCSI_PAGE_SIZE_256 (0)
#define ISCSI_PAGE_SIZE_512 (1)
#define ISCSI_PAGE_SIZE_1K (2)
#define ISCSI_PAGE_SIZE_2K (3)
#define ISCSI_PAGE_SIZE_4K (4)
#define ISCSI_PAGE_SIZE_8K (5)
#define ISCSI_PAGE_SIZE_16K (6)
#define ISCSI_PAGE_SIZE_32K (7)
#define ISCSI_PAGE_SIZE_64K (8)
#define ISCSI_PAGE_SIZE_128K (9)
#define ISCSI_PAGE_SIZE_256K (10)
#define ISCSI_PAGE_SIZE_512K (11)
#define ISCSI_PAGE_SIZE_1M (12)
#define ISCSI_PAGE_SIZE_2M (13)
#define ISCSI_PAGE_SIZE_4M (14)
#define ISCSI_PAGE_SIZE_8M (15)
/* Iscsi PDU related defines */
#define ISCSI_HEADER_SIZE (48)
#define ISCSI_DIGEST_SHIFT (2)
#define ISCSI_DIGEST_SIZE (4)
#define B577XX_ISCSI_CONNECTION_TYPE 3
#endif /*__57XX_ISCSI_CONSTANTS_H_ */
/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#ifndef __57XX_ISCSI_HSI_LINUX_LE__
#define __57XX_ISCSI_HSI_LINUX_LE__
/*
* iSCSI Async CQE
*/
struct bnx2i_async_msg {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 reserved1;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 reserved1;
u8 op_code;
#endif
u32 reserved2;
u32 exp_cmd_sn;
u32 max_cmd_sn;
u32 reserved3[2];
#if defined(__BIG_ENDIAN)
u16 reserved5;
u8 err_code;
u8 reserved4;
#elif defined(__LITTLE_ENDIAN)
u8 reserved4;
u8 err_code;
u16 reserved5;
#endif
u32 reserved6;
u32 lun[2];
#if defined(__BIG_ENDIAN)
u8 async_event;
u8 async_vcode;
u16 param1;
#elif defined(__LITTLE_ENDIAN)
u16 param1;
u8 async_vcode;
u8 async_event;
#endif
#if defined(__BIG_ENDIAN)
u16 param2;
u16 param3;
#elif defined(__LITTLE_ENDIAN)
u16 param3;
u16 param2;
#endif
u32 reserved7[3];
u32 cq_req_sn;
};
/*
* iSCSI Buffer Descriptor (BD)
*/
struct iscsi_bd {
u32 buffer_addr_hi;
u32 buffer_addr_lo;
#if defined(__BIG_ENDIAN)
u16 reserved0;
u16 buffer_length;
#elif defined(__LITTLE_ENDIAN)
u16 buffer_length;
u16 reserved0;
#endif
#if defined(__BIG_ENDIAN)
u16 reserved3;
u16 flags;
#define ISCSI_BD_RESERVED1 (0x3F<<0)
#define ISCSI_BD_RESERVED1_SHIFT 0
#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
#define ISCSI_BD_RESERVED2 (0xFF<<8)
#define ISCSI_BD_RESERVED2_SHIFT 8
#elif defined(__LITTLE_ENDIAN)
u16 flags;
#define ISCSI_BD_RESERVED1 (0x3F<<0)
#define ISCSI_BD_RESERVED1_SHIFT 0
#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
#define ISCSI_BD_RESERVED2 (0xFF<<8)
#define ISCSI_BD_RESERVED2_SHIFT 8
u16 reserved3;
#endif
};
/*
* iSCSI Cleanup SQ WQE
*/
struct bnx2i_cleanup_request {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 reserved1;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 reserved1;
u8 op_code;
#endif
u32 reserved2[3];
#if defined(__BIG_ENDIAN)
u16 reserved3;
u16 itt;
#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
u16 reserved3;
#endif
u32 reserved4[10];
#if defined(__BIG_ENDIAN)
u8 cq_index;
u8 reserved6;
u16 reserved5;
#elif defined(__LITTLE_ENDIAN)
u16 reserved5;
u8 reserved6;
u8 cq_index;
#endif
};
/*
* iSCSI Cleanup CQE
*/
struct bnx2i_cleanup_response {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 status;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 status;
u8 op_code;
#endif
u32 reserved1[3];
u32 reserved2[2];
#if defined(__BIG_ENDIAN)
u16 reserved4;
u8 err_code;
u8 reserved3;
#elif defined(__LITTLE_ENDIAN)
u8 reserved3;
u8 err_code;
u16 reserved4;
#endif
u32 reserved5[7];
#if defined(__BIG_ENDIAN)
u16 reserved6;
u16 itt;
#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
u16 reserved6;
#endif
u32 cq_req_sn;
};
/*
* SCSI read/write SQ WQE
*/
struct bnx2i_cmd_request {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 op_attr;
#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
#define ISCSI_CMD_REQUEST_READ (0x1<<6)
#define ISCSI_CMD_REQUEST_READ_SHIFT 6
#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_attr;
#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
#define ISCSI_CMD_REQUEST_READ (0x1<<6)
#define ISCSI_CMD_REQUEST_READ_SHIFT 6
#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
u8 op_code;
#endif
#if defined(__BIG_ENDIAN)
u16 ud_buffer_offset;
u16 sd_buffer_offset;
#elif defined(__LITTLE_ENDIAN)
u16 sd_buffer_offset;
u16 ud_buffer_offset;
#endif
u32 lun[2];
#if defined(__BIG_ENDIAN)
u16 reserved2;
u16 itt;
#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
u16 reserved2;
#endif
u32 total_data_transfer_length;
u32 cmd_sn;
u32 reserved3;
u32 cdb[4];
u32 zero_fill;
u32 bd_list_addr_lo;
u32 bd_list_addr_hi;
#if defined(__BIG_ENDIAN)
u8 cq_index;
u8 sd_start_bd_index;
u8 ud_start_bd_index;
u8 num_bds;
#elif defined(__LITTLE_ENDIAN)
u8 num_bds;
u8 ud_start_bd_index;
u8 sd_start_bd_index;
u8 cq_index;
#endif
};
/*
* task statistics for write response
*/
struct bnx2i_write_resp_task_stat {
u32 num_data_ins;
};
/*
* task statistics for read response
*/
struct bnx2i_read_resp_task_stat {
#if defined(__BIG_ENDIAN)
u16 num_data_outs;
u16 num_r2ts;
#elif defined(__LITTLE_ENDIAN)
u16 num_r2ts;
u16 num_data_outs;
#endif
};
/*
* task statistics for iSCSI cmd response
*/
union bnx2i_cmd_resp_task_stat {
struct bnx2i_write_resp_task_stat write_stat;
struct bnx2i_read_resp_task_stat read_stat;
};
/*
* SCSI Command CQE
*/
struct bnx2i_cmd_response {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 response_flags;
#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
u8 response;
u8 status;
#elif defined(__LITTLE_ENDIAN)
u8 status;
u8 response;
u8 response_flags;
#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
u8 op_code;
#endif
u32 data_length;
u32 exp_cmd_sn;
u32 max_cmd_sn;
u32 reserved2;
u32 residual_count;
#if defined(__BIG_ENDIAN)
u16 reserved4;
u8 err_code;
u8 reserved3;
#elif defined(__LITTLE_ENDIAN)
u8 reserved3;
u8 err_code;
u16 reserved4;
#endif
u32 reserved5[5];
union bnx2i_cmd_resp_task_stat task_stat;
u32 reserved6;
#if defined(__BIG_ENDIAN)
u16 reserved7;
u16 itt;
#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
u16 reserved7;
#endif
u32 cq_req_sn;
};
/*
* firmware middle-path request SQ WQE
*/
struct bnx2i_fw_mp_request {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 op_attr;
u16 hdr_opaque1;
#elif defined(__LITTLE_ENDIAN)
u16 hdr_opaque1;
u8 op_attr;
u8 op_code;
#endif
u32 data_length;
u32 hdr_opaque2[2];
#if defined(__BIG_ENDIAN)
u16 reserved0;
u16 itt;
#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
u16 reserved0;
#endif
u32 hdr_opaque3[4];
u32 resp_bd_list_addr_lo;
u32 resp_bd_list_addr_hi;
u32 resp_buffer;
#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
#if defined(__BIG_ENDIAN)
u16 reserved4;
u8 reserved3;
u8 flags;
#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
#elif defined(__LITTLE_ENDIAN)
u8 flags;
#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
u8 reserved3;
u16 reserved4;
#endif
u32 bd_list_addr_lo;
u32 bd_list_addr_hi;
#if defined(__BIG_ENDIAN)
u8 cq_index;
u8 reserved6;
u8 reserved5;
u8 num_bds;
#elif defined(__LITTLE_ENDIAN)
u8 num_bds;
u8 reserved5;
u8 reserved6;
u8 cq_index;
#endif
};
/*
* firmware response - CQE: used only by firmware
*/
struct bnx2i_fw_response {
u32 hdr_dword1[2];
u32 hdr_exp_cmd_sn;
u32 hdr_max_cmd_sn;
u32 hdr_ttt;
u32 hdr_res_cnt;
u32 cqe_flags;
#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
u32 stat_sn;
u32 hdr_dword2[2];
u32 hdr_dword3[2];
u32 task_stat;
u32 reserved0;
u32 hdr_itt;
u32 cq_req_sn;
};
/*
* iSCSI KCQ CQE parameters
*/
union iscsi_kcqe_params {
u32 reserved0[4];
};
/*
* iSCSI KCQ CQE
*/
struct iscsi_kcqe {
u32 iscsi_conn_id;
u32 completion_status;
u32 iscsi_conn_context_id;
union iscsi_kcqe_params params;
#if defined(__BIG_ENDIAN)
u8 flags;
#define ISCSI_KCQE_RESERVED0 (0xF<<0)
#define ISCSI_KCQE_RESERVED0_SHIFT 0
#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
#define ISCSI_KCQE_RESERVED1 (0x1<<7)
#define ISCSI_KCQE_RESERVED1_SHIFT 7
u8 op_code;
u16 qe_self_seq;
#elif defined(__LITTLE_ENDIAN)
u16 qe_self_seq;
u8 op_code;
u8 flags;
#define ISCSI_KCQE_RESERVED0 (0xF<<0)
#define ISCSI_KCQE_RESERVED0_SHIFT 0
#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
#define ISCSI_KCQE_RESERVED1 (0x1<<7)
#define ISCSI_KCQE_RESERVED1_SHIFT 7
#endif
};
/*
* iSCSI KWQE header
*/
struct iscsi_kwqe_header {
#if defined(__BIG_ENDIAN)
u8 flags;
#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
u8 op_code;
#elif defined(__LITTLE_ENDIAN)
u8 op_code;
u8 flags;
#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
#endif
};
/*
* iSCSI firmware init request 1
*/
struct iscsi_kwqe_init1 {
#if defined(__BIG_ENDIAN)
struct iscsi_kwqe_header hdr;
u8 reserved0;
u8 num_cqs;
#elif defined(__LITTLE_ENDIAN)
u8 num_cqs;
u8 reserved0;
struct iscsi_kwqe_header hdr;
#endif
u32 dummy_buffer_addr_lo;
u32 dummy_buffer_addr_hi;
#if defined(__BIG_ENDIAN)
u16 num_ccells_per_conn;
u16 num_tasks_per_conn;
#elif defined(__LITTLE_ENDIAN)
u16 num_tasks_per_conn;
u16 num_ccells_per_conn;
#endif
#if defined(__BIG_ENDIAN)
u16 sq_wqes_per_page;
u16 sq_num_wqes;
#elif defined(__LITTLE_ENDIAN)
u16 sq_num_wqes;
u16 sq_wqes_per_page;
#endif
#if defined(__BIG_ENDIAN)
u8 cq_log_wqes_per_page;
u8 flags;
#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
u16 cq_num_wqes;
#elif defined(__LITTLE_ENDIAN)
u16 cq_num_wqes;
u8 flags;
#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
u8 cq_log_wqes_per_page;
#endif
#if defined(__BIG_ENDIAN)
u16 cq_num_pages;
u16 sq_num_pages;
#elif defined(__LITTLE_ENDIAN)
u16 sq_num_pages;
u16 cq_num_pages;
#endif
#if defined(__BIG_ENDIAN)
u16 rq_buffer_size;
u16 rq_num_wqes;
#elif defined(__LITTLE_ENDIAN)
u16 rq_num_wqes;
u16 rq_buffer_size;
#endif
};
/*
* iSCSI firmware init request 2
*/
struct iscsi_kwqe_init2 {
#if defined(__BIG_ENDIAN)
struct iscsi_kwqe_header hdr;
u16 max_cq_sqn;
#elif defined(__LITTLE_ENDIAN)
u16 max_cq_sqn;
struct iscsi_kwqe_header hdr;
#endif
u32 error_bit_map[2];
u32 reserved1[5];
};
/*
* Initial iSCSI connection offload request 1
*/
struct iscsi_kwqe_conn_offload1 {
#if defined(__BIG_ENDIAN)
struct iscsi_kwqe_header hdr;
u16 iscsi_conn_id;
#elif defined(__LITTLE_ENDIAN)
u16 iscsi_conn_id;
struct iscsi_kwqe_header hdr;
#endif
u32 sq_page_table_addr_lo;
u32 sq_page_table_addr_hi;
u32 cq_page_table_addr_lo;
u32 cq_page_table_addr_hi;
u32 reserved0[3];
};
/*
* iSCSI Page Table Entry (PTE)
*/
struct iscsi_pte {
u32 hi;
u32 lo;
};
/*
* Initial iSCSI connection offload request 2
*/
struct iscsi_kwqe_conn_offload2 {
#if defined(__BIG_ENDIAN)
struct iscsi_kwqe_header hdr;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
struct iscsi_kwqe_header hdr;
#endif
u32 rq_page_table_addr_lo;
u32 rq_page_table_addr_hi;
struct iscsi_pte sq_first_pte;
struct iscsi_pte cq_first_pte;
u32 num_additional_wqes;
};
/*
* Initial iSCSI connection offload request 3
*/
struct iscsi_kwqe_conn_offload3 {
#if defined(__BIG_ENDIAN)
struct iscsi_kwqe_header hdr;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
struct iscsi_kwqe_header hdr;
#endif
u32 reserved1;
struct iscsi_pte qp_first_pte[3];
};
/*
* iSCSI connection update request
*/
struct iscsi_kwqe_conn_update {
#if defined(__BIG_ENDIAN)
struct iscsi_kwqe_header hdr;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
struct iscsi_kwqe_header hdr;
#endif
#if defined(__BIG_ENDIAN)
u8 session_error_recovery_level;
u8 max_outstanding_r2ts;
u8 reserved2;
u8 conn_flags;
#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
#elif defined(__LITTLE_ENDIAN)
u8 conn_flags;
#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
u8 reserved2;
u8 max_outstanding_r2ts;
u8 session_error_recovery_level;
#endif
u32 context_id;
u32 max_send_pdu_length;
u32 max_recv_pdu_length;
u32 first_burst_length;
u32 max_burst_length;
u32 exp_stat_sn;
};
/*
* iSCSI destroy connection request
*/
struct iscsi_kwqe_conn_destroy {
#if defined(__BIG_ENDIAN)
struct iscsi_kwqe_header hdr;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
struct iscsi_kwqe_header hdr;
#endif
u32 context_id;
u32 reserved1[6];
};
/*
* iSCSI KWQ WQE
*/
union iscsi_kwqe {
struct iscsi_kwqe_init1 init1;
struct iscsi_kwqe_init2 init2;
struct iscsi_kwqe_conn_offload1 conn_offload1;
struct iscsi_kwqe_conn_offload2 conn_offload2;
struct iscsi_kwqe_conn_update conn_update;
struct iscsi_kwqe_conn_destroy conn_destroy;
};
/*
* iSCSI Login SQ WQE
*/
struct bnx2i_login_request {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 op_attr;
#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
u8 version_max;
u8 version_min;
#elif defined(__LITTLE_ENDIAN)
u8 version_min;
u8 version_max;
u8 op_attr;
#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
u8 op_code;
#endif
u32 data_length;
u32 isid_lo;
#if defined(__BIG_ENDIAN)
u16 isid_hi;
u16 tsih;
#elif defined(__LITTLE_ENDIAN)
u16 tsih;
u16 isid_hi;
#endif
#if defined(__BIG_ENDIAN)
u16 reserved2;
u16 itt;
#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
u16 reserved2;
#endif
#if defined(__BIG_ENDIAN)
u16 cid;
u16 reserved3;
#elif defined(__LITTLE_ENDIAN)
u16 reserved3;
u16 cid;
#endif
u32 cmd_sn;
u32 exp_stat_sn;
u32 reserved4;
u32 resp_bd_list_addr_lo;
u32 resp_bd_list_addr_hi;
u32 resp_buffer;
#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
#if defined(__BIG_ENDIAN)
u16 reserved8;
u8 reserved7;
u8 flags;
#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
#elif defined(__LITTLE_ENDIAN)
u8 flags;
#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
u8 reserved7;
u16 reserved8;
#endif
u32 bd_list_addr_lo;
u32 bd_list_addr_hi;
#if defined(__BIG_ENDIAN)
u8 cq_index;
u8 reserved10;
u8 reserved9;
u8 num_bds;
#elif defined(__LITTLE_ENDIAN)
u8 num_bds;
u8 reserved9;
u8 reserved10;
u8 cq_index;
#endif
};
/*
* iSCSI Login CQE
*/
struct bnx2i_login_response {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 response_flags;
#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
u8 version_max;
u8 version_active;
#elif defined(__LITTLE_ENDIAN)
u8 version_active;
u8 version_max;
u8 response_flags;
#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
u8 op_code;
#endif
u32 data_length;
u32 exp_cmd_sn;
u32 max_cmd_sn;
u32 reserved1[2];
#if defined(__BIG_ENDIAN)
u16 reserved3;
u8 err_code;
u8 reserved2;
#elif defined(__LITTLE_ENDIAN)
u8 reserved2;
u8 err_code;
u16 reserved3;
#endif
u32 stat_sn;
u32 isid_lo;
#if defined(__BIG_ENDIAN)
u16 isid_hi;
u16 tsih;
#elif defined(__LITTLE_ENDIAN)
u16 tsih;
u16 isid_hi;
#endif
#if defined(__BIG_ENDIAN)
u8 status_class;
u8 status_detail;
u16 reserved4;
#elif defined(__LITTLE_ENDIAN)
u16 reserved4;
u8 status_detail;
u8 status_class;
#endif
u32 reserved5[3];
#if defined(__BIG_ENDIAN)
u16 reserved6;
u16 itt;
#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
u16 reserved6;
#endif
u32 cq_req_sn;
};
/*
* iSCSI Logout SQ WQE
*/
struct bnx2i_logout_request {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 op_attr;
#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_attr;
#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
u8 op_code;
#endif
u32 data_length;
u32 reserved1[2];
#if defined(__BIG_ENDIAN)
u16 reserved2;
u16 itt;
#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
u16 reserved2;
#endif
#if defined(__BIG_ENDIAN)
u16 cid;
u16 reserved3;
#elif defined(__LITTLE_ENDIAN)
u16 reserved3;
u16 cid;
#endif
u32 cmd_sn;
u32 reserved4[5];
u32 zero_fill;
u32 bd_list_addr_lo;
u32 bd_list_addr_hi;
#if defined(__BIG_ENDIAN)
u8 cq_index;
u8 reserved6;
u8 reserved5;
u8 num_bds;
#elif defined(__LITTLE_ENDIAN)
u8 num_bds;
u8 reserved5;
u8 reserved6;
u8 cq_index;
#endif
};
/*
* iSCSI Logout CQE
*/
struct bnx2i_logout_response {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 reserved1;
u8 response;
u8 reserved0;
#elif defined(__LITTLE_ENDIAN)
u8 reserved0;
u8 response;
u8 reserved1;
u8 op_code;
#endif
u32 reserved2;
u32 exp_cmd_sn;
u32 max_cmd_sn;
u32 reserved3[2];
#if defined(__BIG_ENDIAN)
u16 reserved5;
u8 err_code;
u8 reserved4;
#elif defined(__LITTLE_ENDIAN)
u8 reserved4;
u8 err_code;
u16 reserved5;
#endif
u32 reserved6[3];
#if defined(__BIG_ENDIAN)
u16 time_to_wait;
u16 time_to_retain;
#elif defined(__LITTLE_ENDIAN)
u16 time_to_retain;
u16 time_to_wait;
#endif
u32 reserved7[3];
#if defined(__BIG_ENDIAN)
u16 reserved8;
u16 itt;
#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
u16 reserved8;
#endif
u32 cq_req_sn;
};
/*
* iSCSI Nop-In CQE
*/
struct bnx2i_nop_in_msg {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 reserved1;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 reserved1;
u8 op_code;
#endif
u32 data_length;
u32 exp_cmd_sn;
u32 max_cmd_sn;
u32 ttt;
u32 reserved2;
#if defined(__BIG_ENDIAN)
u16 reserved4;
u8 err_code;
u8 reserved3;
#elif defined(__LITTLE_ENDIAN)
u8 reserved3;
u8 err_code;
u16 reserved4;
#endif
u32 reserved5;
u32 lun[2];
u32 reserved6[4];
#if defined(__BIG_ENDIAN)
u16 reserved7;
u16 itt;
#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
u16 reserved7;
#endif
u32 cq_req_sn;
};
/*
* iSCSI NOP-OUT SQ WQE
*/
struct bnx2i_nop_out_request {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 op_attr;
#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_attr;
#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
u8 op_code;
#endif
u32 data_length;
u32 lun[2];
#if defined(__BIG_ENDIAN)
u16 reserved2;
u16 itt;
#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
u16 reserved2;
#endif
u32 ttt;
u32 cmd_sn;
u32 reserved3[2];
u32 resp_bd_list_addr_lo;
u32 resp_bd_list_addr_hi;
u32 resp_buffer;
#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
#if defined(__BIG_ENDIAN)
u16 reserved7;
u8 reserved6;
u8 flags;
#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
#elif defined(__LITTLE_ENDIAN)
u8 flags;
#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
u8 reserved6;
u16 reserved7;
#endif
u32 bd_list_addr_lo;
u32 bd_list_addr_hi;
#if defined(__BIG_ENDIAN)
u8 cq_index;
u8 reserved9;
u8 reserved8;
u8 num_bds;
#elif defined(__LITTLE_ENDIAN)
u8 num_bds;
u8 reserved8;
u8 reserved9;
u8 cq_index;
#endif
};
/*
* iSCSI Reject CQE
*/
struct bnx2i_reject_msg {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 reserved1;
u8 reason;
u8 reserved0;
#elif defined(__LITTLE_ENDIAN)
u8 reserved0;
u8 reason;
u8 reserved1;
u8 op_code;
#endif
u32 data_length;
u32 exp_cmd_sn;
u32 max_cmd_sn;
u32 reserved2[2];
#if defined(__BIG_ENDIAN)
u16 reserved4;
u8 err_code;
u8 reserved3;
#elif defined(__LITTLE_ENDIAN)
u8 reserved3;
u8 err_code;
u16 reserved4;
#endif
u32 reserved5[8];
u32 cq_req_sn;
};
/*
* bnx2i iSCSI TMF SQ WQE
*/
struct bnx2i_tmf_request {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 op_attr;
#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_attr;
#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
u8 op_code;
#endif
u32 data_length;
u32 lun[2];
#if defined(__BIG_ENDIAN)
u16 reserved1;
u16 itt;
#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
u16 reserved1;
#endif
u32 ref_itt;
u32 cmd_sn;
u32 reserved2;
u32 ref_cmd_sn;
u32 reserved3[3];
u32 zero_fill;
u32 bd_list_addr_lo;
u32 bd_list_addr_hi;
#if defined(__BIG_ENDIAN)
u8 cq_index;
u8 reserved5;
u8 reserved4;
u8 num_bds;
#elif defined(__LITTLE_ENDIAN)
u8 num_bds;
u8 reserved4;
u8 reserved5;
u8 cq_index;
#endif
};
/*
* iSCSI Text SQ WQE
*/
struct bnx2i_text_request {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 op_attr;
#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_attr;
#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
u8 op_code;
#endif
u32 data_length;
u32 lun[2];
#if defined(__BIG_ENDIAN)
u16 reserved3;
u16 itt;
#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
u16 reserved3;
#endif
u32 ttt;
u32 cmd_sn;
u32 reserved4[2];
u32 resp_bd_list_addr_lo;
u32 resp_bd_list_addr_hi;
u32 resp_buffer;
#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
u32 zero_fill;
u32 bd_list_addr_lo;
u32 bd_list_addr_hi;
#if defined(__BIG_ENDIAN)
u8 cq_index;
u8 reserved7;
u8 reserved6;
u8 num_bds;
#elif defined(__LITTLE_ENDIAN)
u8 num_bds;
u8 reserved6;
u8 reserved7;
u8 cq_index;
#endif
};
/*
* iSCSI SQ WQE
*/
union iscsi_request {
struct bnx2i_cmd_request cmd;
struct bnx2i_tmf_request tmf;
struct bnx2i_nop_out_request nop_out;
struct bnx2i_login_request login_req;
struct bnx2i_text_request text;
struct bnx2i_logout_request logout_req;
struct bnx2i_cleanup_request cleanup;
};
/*
* iSCSI TMF CQE
*/
struct bnx2i_tmf_response {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 reserved1;
u8 response;
u8 reserved0;
#elif defined(__LITTLE_ENDIAN)
u8 reserved0;
u8 response;
u8 reserved1;
u8 op_code;
#endif
u32 reserved2;
u32 exp_cmd_sn;
u32 max_cmd_sn;
u32 reserved3[2];
#if defined(__BIG_ENDIAN)
u16 reserved5;
u8 err_code;
u8 reserved4;
#elif defined(__LITTLE_ENDIAN)
u8 reserved4;
u8 err_code;
u16 reserved5;
#endif
u32 reserved6[7];
#if defined(__BIG_ENDIAN)
u16 reserved7;
u16 itt;
#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
u16 reserved7;
#endif
u32 cq_req_sn;
};
/*
* iSCSI Text CQE
*/
struct bnx2i_text_response {
#if defined(__BIG_ENDIAN)
u8 op_code;
u8 response_flags;
#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 response_flags;
#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
u8 op_code;
#endif
u32 data_length;
u32 exp_cmd_sn;
u32 max_cmd_sn;
u32 ttt;
u32 reserved2;
#if defined(__BIG_ENDIAN)
u16 reserved4;
u8 err_code;
u8 reserved3;
#elif defined(__LITTLE_ENDIAN)
u8 reserved3;
u8 err_code;
u16 reserved4;
#endif
u32 reserved5;
u32 lun[2];
u32 reserved6[4];
#if defined(__BIG_ENDIAN)
u16 reserved7;
u16 itt;
#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 itt;
#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
u16 reserved7;
#endif
u32 cq_req_sn;
};
/*
* iSCSI CQE
*/
union iscsi_response {
struct bnx2i_cmd_response cmd;
struct bnx2i_tmf_response tmf;
struct bnx2i_login_response login_resp;
struct bnx2i_text_response text;
struct bnx2i_logout_response logout_resp;
struct bnx2i_cleanup_response cleanup;
struct bnx2i_reject_msg reject;
struct bnx2i_async_msg async;
struct bnx2i_nop_in_msg nop_in;
};
#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
config SCSI_BNX2_ISCSI
tristate "Broadcom NetXtreme II iSCSI support"
select SCSI_ISCSI_ATTRS
select CNIC
---help---
This driver supports iSCSI offload for the Broadcom NetXtreme II
devices.
bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#ifndef _BNX2I_H_
#define _BNX2I_H_
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/in.h>
#include <linux/kfifo.h>
#include <linux/netdevice.h>
#include <linux/completion.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/iscsi_proto.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include "../../net/cnic_if.h"
#include "57xx_iscsi_hsi.h"
#include "57xx_iscsi_constants.h"
#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
#define BNX2I_MAX_ADAPTERS 8
#define ISCSI_MAX_CONNS_PER_HBA 128
#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
#define ISCSI_MAX_CMDS_PER_SESS 128
/* Total active commands across all connections supported by devices */
#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
#define ISCSI_MAX_BDS_PER_CMD 32
#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
#define MAX_BD_LENGTH 65535
#define BD_SPLIT_SIZE 32768
/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
#define BNX2I_SQ_WQES_MIN 16
#define BNX2I_570X_SQ_WQES_MAX 128
#define BNX2I_5770X_SQ_WQES_MAX 512
#define BNX2I_570X_SQ_WQES_DEFAULT 128
#define BNX2I_5770X_SQ_WQES_DEFAULT 256
#define BNX2I_570X_CQ_WQES_MAX 128
#define BNX2I_5770X_CQ_WQES_MAX 512
#define BNX2I_RQ_WQES_MIN 16
#define BNX2I_RQ_WQES_MAX 32
#define BNX2I_RQ_WQES_DEFAULT 16
/* CCELLs per conn */
#define BNX2I_CCELLS_MIN 16
#define BNX2I_CCELLS_MAX 96
#define BNX2I_CCELLS_DEFAULT 64
#define ITT_INVALID_SIGNATURE 0xFFFF
#define ISCSI_CMD_CLEANUP_TIMEOUT 100
#define BNX2I_CONN_CTX_BUF_SIZE 16384
#define BNX2I_SQ_WQE_SIZE 64
#define BNX2I_RQ_WQE_SIZE 256
#define BNX2I_CQE_SIZE 64
#define MB_KERNEL_CTX_SHIFT 8
#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
#define CTX_SHIFT 7
#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
#define CTX_OFFSET 0x10000
#define MAX_CID_CNT 0x4000
/* 5709 context registers */
#define BNX2_MQ_CONFIG2 0x00003d00
#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
/* 57710's BAR2 is mapped to doorbell registers */
#define BNX2X_DOORBELL_PCI_BAR 2
#define BNX2X_MAX_CQS 8
#define CNIC_ARM_CQE 1
#define CNIC_DISARM_CQE 0
#define REG_RD(__hba, offset) \
readl(__hba->regview + offset)
#define REG_WR(__hba, offset, val) \
writel(val, __hba->regview + offset)
/**
* struct generic_pdu_resc - login pdu resource structure
*
* @req_buf: driver buffer used to stage payload associated with
* the login request
* @req_dma_addr: dma address for iscsi login request payload buffer
* @req_buf_size: actual login request payload length
* @req_wr_ptr: pointer into login request buffer when next data is
* to be written
* @resp_hdr: iscsi header where iscsi login response header is to
* be recreated
* @resp_buf: buffer to stage login response payload
* @resp_dma_addr: login response payload buffer dma address
* @resp_buf_size: login response paylod length
* @resp_wr_ptr: pointer into login response buffer when next data is
* to be written
* @req_bd_tbl: iscsi login request payload BD table
* @req_bd_dma: login request BD table dma address
* @resp_bd_tbl: iscsi login response payload BD table
* @resp_bd_dma: login request BD table dma address
*
* following structure defines buffer info for generic pdus such as iSCSI Login,
* Logout and NOP
*/
struct generic_pdu_resc {
char *req_buf;
dma_addr_t req_dma_addr;
u32 req_buf_size;
char *req_wr_ptr;
struct iscsi_hdr resp_hdr;
char *resp_buf;
dma_addr_t resp_dma_addr;
u32 resp_buf_size;
char *resp_wr_ptr;
char *req_bd_tbl;
dma_addr_t req_bd_dma;
char *resp_bd_tbl;
dma_addr_t resp_bd_dma;
};
/**
* struct bd_resc_page - tracks DMA'able memory allocated for BD tables
*
* @link: list head to link elements
* @max_ptrs: maximun pointers that can be stored in this page
* @num_valid: number of pointer valid in this page
* @page: base addess for page pointer array
*
* structure to track DMA'able memory allocated for command BD tables
*/
struct bd_resc_page {
struct list_head link;
u32 max_ptrs;
u32 num_valid;
void *page[1];
};
/**
* struct io_bdt - I/O buffer destricptor table
*
* @bd_tbl: BD table's virtual address
* @bd_tbl_dma: BD table's dma address
* @bd_valid: num valid BD entries
*
* IO BD table
*/
struct io_bdt {
struct iscsi_bd *bd_tbl;
dma_addr_t bd_tbl_dma;
u16 bd_valid;
};
/**
* bnx2i_cmd - iscsi command structure
*
* @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
* @sg: SG list
* @io_tbl: buffer descriptor (BD) table
* @bd_tbl_dma: buffer descriptor (BD) table's dma address
*/
struct bnx2i_cmd {
struct iscsi_hdr hdr;
struct bnx2i_conn *conn;
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct io_bdt io_tbl;
dma_addr_t bd_tbl_dma;
struct bnx2i_cmd_request req;
};
/**
* struct bnx2i_conn - iscsi connection structure
*
* @cls_conn: pointer to iscsi cls conn
* @hba: adapter structure pointer
* @iscsi_conn_cid: iscsi conn id
* @fw_cid: firmware iscsi context id
* @ep: endpoint structure pointer
* @gen_pdu: login/nopout/logout pdu resources
* @violation_notified: bit mask used to track iscsi error/warning messages
* already printed out
*
* iSCSI connection structure
*/
struct bnx2i_conn {
struct iscsi_cls_conn *cls_conn;
struct bnx2i_hba *hba;
struct completion cmd_cleanup_cmpl;
int is_bound;
u32 iscsi_conn_cid;
#define BNX2I_CID_RESERVED 0x5AFF
u32 fw_cid;
struct timer_list poll_timer;
/*
* Queue Pair (QP) related structure elements.
*/
struct bnx2i_endpoint *ep;
/*
* Buffer for login negotiation process
*/
struct generic_pdu_resc gen_pdu;
u64 violation_notified;
};
/**
* struct iscsi_cid_queue - Per adapter iscsi cid queue
*
* @cid_que_base: queue base memory
* @cid_que: queue memory pointer
* @cid_q_prod_idx: produce index
* @cid_q_cons_idx: consumer index
* @cid_q_max_idx: max index. used to detect wrap around condition
* @cid_free_cnt: queue size
* @conn_cid_tbl: iscsi cid to conn structure mapping table
*
* Per adapter iSCSI CID Queue
*/
struct iscsi_cid_queue {
void *cid_que_base;
u32 *cid_que;
u32 cid_q_prod_idx;
u32 cid_q_cons_idx;
u32 cid_q_max_idx;
u32 cid_free_cnt;
struct bnx2i_conn **conn_cid_tbl;
};
/**
* struct bnx2i_hba - bnx2i adapter structure
*
* @link: list head to link elements
* @cnic: pointer to cnic device
* @pcidev: pointer to pci dev
* @netdev: pointer to netdev structure
* @regview: mapped PCI register space
* @age: age, incremented by every recovery
* @cnic_dev_type: cnic device type, 5706/5708/5709/57710
* @mail_queue_access: mailbox queue access mode, applicable to 5709 only
* @reg_with_cnic: indicates whether the device is register with CNIC
* @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
* @mtu_supported: Ethernet MTU supported
* @shost: scsi host pointer
* @max_sqes: SQ size
* @max_rqes: RQ size
* @max_cqes: CQ size
* @num_ccell: number of command cells per connection
* @ofld_conns_active: active connection list
* @max_active_conns: max offload connections supported by this device
* @cid_que: iscsi cid queue
* @ep_rdwr_lock: read / write lock to synchronize various ep lists
* @ep_ofld_list: connection list for pending offload completion
* @ep_destroy_list: connection list for pending offload completion
* @mp_bd_tbl: BD table to be used with middle path requests
* @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
* @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
* @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
* @lock: lock to synchonize access to hba structure
* @pci_did: PCI device ID
* @pci_vid: PCI vendor ID
* @pci_sdid: PCI subsystem device ID
* @pci_svid: PCI subsystem vendor ID
* @pci_func: PCI function number in system pci tree
* @pci_devno: PCI device number in system pci tree
* @num_wqe_sent: statistic counter, total wqe's sent
* @num_cqe_rcvd: statistic counter, total cqe's received
* @num_intr_claimed: statistic counter, total interrupts claimed
* @link_changed_count: statistic counter, num of link change notifications
* received
* @ipaddr_changed_count: statistic counter, num times IP address changed while
* at least one connection is offloaded
* @num_sess_opened: statistic counter, total num sessions opened
* @num_conn_opened: statistic counter, total num conns opened on this hba
* @ctx_ccell_tasks: captures number of ccells and tasks supported by
* currently offloaded connection, used to decode
* context memory
*
* Adapter Data Structure
*/
struct bnx2i_hba {
struct list_head link;
struct cnic_dev *cnic;
struct pci_dev *pcidev;
struct net_device *netdev;
void __iomem *regview;
u32 age;
unsigned long cnic_dev_type;
#define BNX2I_NX2_DEV_5706 0x0
#define BNX2I_NX2_DEV_5708 0x1
#define BNX2I_NX2_DEV_5709 0x2
#define BNX2I_NX2_DEV_57710 0x3
u32 mail_queue_access;
#define BNX2I_MQ_KERNEL_MODE 0x0
#define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
#define BNX2I_MQ_BIN_MODE 0x2
unsigned long reg_with_cnic;
#define BNX2I_CNIC_REGISTERED 1
unsigned long adapter_state;
#define ADAPTER_STATE_UP 0
#define ADAPTER_STATE_GOING_DOWN 1
#define ADAPTER_STATE_LINK_DOWN 2
#define ADAPTER_STATE_INIT_FAILED 31
unsigned int mtu_supported;
#define BNX2I_MAX_MTU_SUPPORTED 1500
struct Scsi_Host *shost;
u32 max_sqes;
u32 max_rqes;
u32 max_cqes;
u32 num_ccell;
int ofld_conns_active;
int max_active_conns;
struct iscsi_cid_queue cid_que;
rwlock_t ep_rdwr_lock;
struct list_head ep_ofld_list;
struct list_head ep_destroy_list;
/*
* BD table to be used with MP (Middle Path requests.
*/
char *mp_bd_tbl;
dma_addr_t mp_bd_dma;
char *dummy_buffer;
dma_addr_t dummy_buf_dma;
spinlock_t lock; /* protects hba structure access */
struct mutex net_dev_lock;/* sync net device access */
/*
* PCI related info.
*/
u16 pci_did;
u16 pci_vid;
u16 pci_sdid;
u16 pci_svid;
u16 pci_func;
u16 pci_devno;
/*
* Following are a bunch of statistics useful during development
* and later stage for score boarding.
*/
u32 num_wqe_sent;
u32 num_cqe_rcvd;
u32 num_intr_claimed;
u32 link_changed_count;
u32 ipaddr_changed_count;
u32 num_sess_opened;
u32 num_conn_opened;
unsigned int ctx_ccell_tasks;
};
/*******************************************************************************
* QP [ SQ / RQ / CQ ] info.
******************************************************************************/
/*
* SQ/RQ/CQ generic structure definition
*/
struct sqe {
u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
};
struct rqe {
u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
};
struct cqe {
u8 cqe_byte[BNX2I_CQE_SIZE];
};
enum {
#if defined(__LITTLE_ENDIAN)
CNIC_EVENT_COAL_INDEX = 0x0,
CNIC_SEND_DOORBELL = 0x4,
CNIC_EVENT_CQ_ARM = 0x7,
CNIC_RECV_DOORBELL = 0x8
#elif defined(__BIG_ENDIAN)
CNIC_EVENT_COAL_INDEX = 0x2,
CNIC_SEND_DOORBELL = 0x6,
CNIC_EVENT_CQ_ARM = 0x4,
CNIC_RECV_DOORBELL = 0xa
#endif
};
/*
* CQ DB
*/
struct bnx2x_iscsi_cq_pend_cmpl {
/* CQ producer, updated by Ustorm */
u16 ustrom_prod;
/* CQ pending completion counter */
u16 pend_cntr;
};
struct bnx2i_5771x_cq_db {
struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
/* CQ pending completion ITT array */
u16 itt[BNX2X_MAX_CQS];
/* Cstorm CQ sequence to notify array, updated by driver */;
u16 sqn[BNX2X_MAX_CQS];
u32 reserved[4] /* 16 byte allignment */;
};
struct bnx2i_5771x_sq_rq_db {
u16 prod_idx;
u8 reserved0[14]; /* Pad structure size to 16 bytes */
};
struct bnx2i_5771x_dbell_hdr {
u8 header;
/* 1 for rx doorbell, 0 for tx doorbell */
#define B577XX_DOORBELL_HDR_RX (0x1<<0)
#define B577XX_DOORBELL_HDR_RX_SHIFT 0
/* 0 for normal doorbell, 1 for advertise wnd doorbell */
#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
/* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
/* connection type */
#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
};
struct bnx2i_5771x_dbell {
struct bnx2i_5771x_dbell_hdr dbell;
u8 pad[3];
};
/**
* struct qp_info - QP (share queue region) atrributes structure
*
* @ctx_base: ioremapped pci register base to access doorbell register
* pertaining to this offloaded connection
* @sq_virt: virtual address of send queue (SQ) region
* @sq_phys: DMA address of SQ memory region
* @sq_mem_size: SQ size
* @sq_prod_qe: SQ producer entry pointer
* @sq_cons_qe: SQ consumer entry pointer
* @sq_first_qe: virtaul address of first entry in SQ
* @sq_last_qe: virtaul address of last entry in SQ
* @sq_prod_idx: SQ producer index
* @sq_cons_idx: SQ consumer index
* @sqe_left: number sq entry left
* @sq_pgtbl_virt: page table describing buffer consituting SQ region
* @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
* @sq_pgtbl_size: SQ page table size
* @cq_virt: virtual address of completion queue (CQ) region
* @cq_phys: DMA address of RQ memory region
* @cq_mem_size: CQ size
* @cq_prod_qe: CQ producer entry pointer
* @cq_cons_qe: CQ consumer entry pointer
* @cq_first_qe: virtaul address of first entry in CQ
* @cq_last_qe: virtaul address of last entry in CQ
* @cq_prod_idx: CQ producer index
* @cq_cons_idx: CQ consumer index
* @cqe_left: number cq entry left
* @cqe_size: size of each CQ entry
* @cqe_exp_seq_sn: next expected CQE sequence number
* @cq_pgtbl_virt: page table describing buffer consituting CQ region
* @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
* @cq_pgtbl_size: CQ page table size
* @rq_virt: virtual address of receive queue (RQ) region
* @rq_phys: DMA address of RQ memory region
* @rq_mem_size: RQ size
* @rq_prod_qe: RQ producer entry pointer
* @rq_cons_qe: RQ consumer entry pointer
* @rq_first_qe: virtaul address of first entry in RQ
* @rq_last_qe: virtaul address of last entry in RQ
* @rq_prod_idx: RQ producer index
* @rq_cons_idx: RQ consumer index
* @rqe_left: number rq entry left
* @rq_pgtbl_virt: page table describing buffer consituting RQ region
* @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
* @rq_pgtbl_size: RQ page table size
*
* queue pair (QP) is a per connection shared data structure which is used
* to send work requests (SQ), receive completion notifications (CQ)
* and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
* below holds queue memory, consumer/producer indexes and page table
* information
*/
struct qp_info {
void __iomem *ctx_base;
#define DPM_TRIGER_TYPE 0x40
#define BNX2I_570x_QUE_DB_SIZE 0
#define BNX2I_5771x_QUE_DB_SIZE 16
struct sqe *sq_virt;
dma_addr_t sq_phys;
u32 sq_mem_size;
struct sqe *sq_prod_qe;
struct sqe *sq_cons_qe;
struct sqe *sq_first_qe;
struct sqe *sq_last_qe;
u16 sq_prod_idx;
u16 sq_cons_idx;
u32 sqe_left;
void *sq_pgtbl_virt;
dma_addr_t sq_pgtbl_phys;
u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
struct cqe *cq_virt;
dma_addr_t cq_phys;
u32 cq_mem_size;
struct cqe *cq_prod_qe;
struct cqe *cq_cons_qe;
struct cqe *cq_first_qe;
struct cqe *cq_last_qe;
u16 cq_prod_idx;
u16 cq_cons_idx;
u32 cqe_left;
u32 cqe_size;
u32 cqe_exp_seq_sn;
void *cq_pgtbl_virt;
dma_addr_t cq_pgtbl_phys;
u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
struct rqe *rq_virt;
dma_addr_t rq_phys;
u32 rq_mem_size;
struct rqe *rq_prod_qe;
struct rqe *rq_cons_qe;
struct rqe *rq_first_qe;
struct rqe *rq_last_qe;
u16 rq_prod_idx;
u16 rq_cons_idx;
u32 rqe_left;
void *rq_pgtbl_virt;
dma_addr_t rq_pgtbl_phys;
u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
};
/*
* CID handles
*/
struct ep_handles {
u32 fw_cid;
u32 drv_iscsi_cid;
u16 pg_cid;
u16 rsvd;
};
enum {
EP_STATE_IDLE = 0x0,
EP_STATE_PG_OFLD_START = 0x1,
EP_STATE_PG_OFLD_COMPL = 0x2,
EP_STATE_OFLD_START = 0x4,
EP_STATE_OFLD_COMPL = 0x8,
EP_STATE_CONNECT_START = 0x10,
EP_STATE_CONNECT_COMPL = 0x20,
EP_STATE_ULP_UPDATE_START = 0x40,
EP_STATE_ULP_UPDATE_COMPL = 0x80,
EP_STATE_DISCONN_START = 0x100,
EP_STATE_DISCONN_COMPL = 0x200,
EP_STATE_CLEANUP_START = 0x400,
EP_STATE_CLEANUP_CMPL = 0x800,
EP_STATE_TCP_FIN_RCVD = 0x1000,
EP_STATE_TCP_RST_RCVD = 0x2000,
EP_STATE_PG_OFLD_FAILED = 0x1000000,
EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
EP_STATE_CLEANUP_FAILED = 0x4000000,
EP_STATE_OFLD_FAILED = 0x8000000,
EP_STATE_CONNECT_FAILED = 0x10000000,
EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
};
/**
* struct bnx2i_endpoint - representation of tcp connection in NX2 world
*
* @link: list head to link elements
* @hba: adapter to which this connection belongs
* @conn: iscsi connection this EP is linked to
* @sess: iscsi session this EP is linked to
* @cm_sk: cnic sock struct
* @hba_age: age to detect if 'iscsid' issues ep_disconnect()
* after HBA reset is completed by bnx2i/cnic/bnx2
* modules
* @state: tracks offload connection state machine
* @teardown_mode: indicates if conn teardown is abortive or orderly
* @qp: QP information
* @ids: contains chip allocated *context id* & driver assigned
* *iscsi cid*
* @ofld_timer: offload timer to detect timeout
* @ofld_wait: wait queue
*
* Endpoint Structure - equivalent of tcp socket structure
*/
struct bnx2i_endpoint {
struct list_head link;
struct bnx2i_hba *hba;
struct bnx2i_conn *conn;
struct cnic_sock *cm_sk;
u32 hba_age;
u32 state;
unsigned long timestamp;
int num_active_cmds;
struct qp_info qp;
struct ep_handles ids;
#define ep_iscsi_cid ids.drv_iscsi_cid
#define ep_cid ids.fw_cid
#define ep_pg_cid ids.pg_cid
struct timer_list ofld_timer;
wait_queue_head_t ofld_wait;
};
/* Global variables */
extern unsigned int error_mask1, error_mask2;
extern u64 iscsi_error_mask;
extern unsigned int en_tcp_dack;
extern unsigned int event_coal_div;
extern struct scsi_transport_template *bnx2i_scsi_xport_template;
extern struct iscsi_transport bnx2i_iscsi_transport;
extern struct cnic_ulp_ops bnx2i_cnic_cb;
extern unsigned int sq_size;
extern unsigned int rq_size;
extern struct device_attribute *bnx2i_dev_attributes[];
/*
* Function Prototypes
*/
extern void bnx2i_identify_device(struct bnx2i_hba *hba);
extern void bnx2i_register_device(struct bnx2i_hba *hba);
extern void bnx2i_ulp_init(struct cnic_dev *dev);
extern void bnx2i_ulp_exit(struct cnic_dev *dev);
extern void bnx2i_start(void *handle);
extern void bnx2i_stop(void *handle);
extern void bnx2i_reg_dev_all(void);
extern void bnx2i_unreg_dev_all(void);
extern struct bnx2i_hba *get_adapter_list_head(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
u16 iscsi_cid);
int bnx2i_alloc_ep_pool(void);
void bnx2i_release_ep_pool(void);
struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
void bnx2i_free_hba(struct bnx2i_hba *hba);
void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
void bnx2i_drop_session(struct iscsi_cls_session *session);
extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
struct bnx2i_cmd *cmnd);
extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
struct iscsi_task *mtask, u32 ttt,
char *datap, int data_len, int unsol);
extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
struct bnx2i_cmd *cmd);
extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern void bnx2i_ep_ofld_timer(unsigned long data);
extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
struct bnx2i_hba *hba, u32 iscsi_cid);
extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
struct bnx2i_hba *hba, u32 iscsi_cid);
extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
/* Debug related function prototypes */
extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
#endif
/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#include <scsi/scsi_tcq.h>
#include <scsi/libiscsi.h>
#include "bnx2i.h"
/**
* bnx2i_get_cid_num - get cid from ep
* @ep: endpoint pointer
*
* Only applicable to 57710 family of devices
*/
static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
{
u32 cid;
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
cid = ep->ep_cid;
else
cid = GET_CID_NUM(ep->ep_cid);
return cid;
}
/**
* bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
* @hba: Adapter for which adjustments is to be made
*
* Only applicable to 57710 family of devices
*/
static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
{
u32 num_elements_per_pg;
if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
if (!is_power_of_2(hba->max_sqes))
hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
if (!is_power_of_2(hba->max_rqes))
hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
}
/* Adjust each queue size if the user selection does not
* yield integral num of page buffers
*/
/* adjust SQ */
num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
if (hba->max_sqes < num_elements_per_pg)
hba->max_sqes = num_elements_per_pg;
else if (hba->max_sqes % num_elements_per_pg)
hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
~(num_elements_per_pg - 1);
/* adjust CQ */
num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
if (hba->max_cqes < num_elements_per_pg)
hba->max_cqes = num_elements_per_pg;
else if (hba->max_cqes % num_elements_per_pg)
hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
~(num_elements_per_pg - 1);
/* adjust RQ */
num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
if (hba->max_rqes < num_elements_per_pg)
hba->max_rqes = num_elements_per_pg;
else if (hba->max_rqes % num_elements_per_pg)
hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
~(num_elements_per_pg - 1);
}
/**
* bnx2i_get_link_state - get network interface link state
* @hba: adapter instance pointer
*
* updates adapter structure flag based on netdev state
*/
static void bnx2i_get_link_state(struct bnx2i_hba *hba)
{
if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
else
clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
}
/**
* bnx2i_iscsi_license_error - displays iscsi license related error message
* @hba: adapter instance pointer
* @error_code: error classification
*
* Puts out an error log when driver is unable to offload iscsi connection
* due to license restrictions
*/
static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
{
if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
/* iSCSI offload not supported on this device */
printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
hba->netdev->name);
if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
/* iSCSI offload not supported on this LOM device */
printk(KERN_ERR "bnx2i: LOM is not enable to "
"offload iSCSI connections, dev=%s\n",
hba->netdev->name);
set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
}
/**
* bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
* @ep: endpoint (transport indentifier) structure
* @action: action, ARM or DISARM. For now only ARM_CQE is used
*
* Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
* the driver. EQ event is generated CQ index is hit or at least 1 CQ is
* outstanding and on chip timer expires
*/
void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
{
struct bnx2i_5771x_cq_db *cq_db;
u16 cq_index;
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
return;
if (action == CNIC_ARM_CQE) {
cq_index = ep->qp.cqe_exp_seq_sn +
ep->num_active_cmds / event_coal_div;
cq_index %= (ep->qp.cqe_size * 2 + 1);
if (!cq_index) {
cq_index = 1;
cq_db = (struct bnx2i_5771x_cq_db *)
ep->qp.cq_pgtbl_virt;
cq_db->sqn[0] = cq_index;
}
}
}
/**
* bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
* @conn: iscsi connection on which RQ event occured
* @ptr: driver buffer to which RQ buffer contents is to
* be copied
* @len: length of valid data inside RQ buf
*
* Copies RQ buffer contents from shared (DMA'able) memory region to
* driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
* scsi sense info
*/
void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
{
if (!bnx2i_conn->ep->qp.rqe_left)
return;
bnx2i_conn->ep->qp.rqe_left--;
memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
bnx2i_conn->ep->qp.rq_cons_idx = 0;
} else {
bnx2i_conn->ep->qp.rq_cons_qe++;
bnx2i_conn->ep->qp.rq_cons_idx++;
}
}
static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
{
struct bnx2i_5771x_dbell dbell;
u32 msg;
memset(&dbell, 0, sizeof(dbell));
dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
msg = *((u32 *)&dbell);
/* TODO : get doorbell register mapping */
writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
}
/**
* bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
* @conn: iscsi connection on which event to post
* @count: number of RQ buffer being posted to chip
*
* No need to ring hardware doorbell for 57710 family of devices
*/
void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
{
struct bnx2i_5771x_sq_rq_db *rq_db;
u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
struct bnx2i_endpoint *ep = bnx2i_conn->ep;
ep->qp.rqe_left += count;
ep->qp.rq_prod_idx &= 0x7FFF;
ep->qp.rq_prod_idx += count;
if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
if (!hi_bit)
ep->qp.rq_prod_idx |= 0x8000;
} else
ep->qp.rq_prod_idx |= hi_bit;
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
rq_db->prod_idx = ep->qp.rq_prod_idx;
/* no need to ring hardware doorbell for 57710 */
} else {
writew(ep->qp.rq_prod_idx,
ep->qp.ctx_base + CNIC_RECV_DOORBELL);
}
mmiowb();
}
/**
* bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
* @conn: iscsi connection to which new SQ entries belong
* @count: number of SQ WQEs to post
*
* SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
* of devices. For 5706/5708/5709 new SQ WQE count is written into the
* doorbell register
*/
static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
{
struct bnx2i_5771x_sq_rq_db *sq_db;
struct bnx2i_endpoint *ep = bnx2i_conn->ep;
ep->num_active_cmds++;
wmb(); /* flush SQ WQE memory before the doorbell is rung */
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
sq_db->prod_idx = ep->qp.sq_prod_idx;
bnx2i_ring_577xx_doorbell(bnx2i_conn);
} else
writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
mmiowb(); /* flush posted PCI writes */
}
/**
* bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
* @conn: iscsi connection to which new SQ entries belong
* @count: number of SQ WQEs to post
*
* this routine will update SQ driver parameters and ring the doorbell
*/
static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
int count)
{
int tmp_cnt;
if (count == 1) {
if (bnx2i_conn->ep->qp.sq_prod_qe ==
bnx2i_conn->ep->qp.sq_last_qe)
bnx2i_conn->ep->qp.sq_prod_qe =
bnx2i_conn->ep->qp.sq_first_qe;
else
bnx2i_conn->ep->qp.sq_prod_qe++;
} else {
if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
bnx2i_conn->ep->qp.sq_last_qe)
bnx2i_conn->ep->qp.sq_prod_qe += count;
else {
tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
bnx2i_conn->ep->qp.sq_prod_qe;
bnx2i_conn->ep->qp.sq_prod_qe =
&bnx2i_conn->ep->qp.sq_first_qe[count -
(tmp_cnt + 1)];
}
}
bnx2i_conn->ep->qp.sq_prod_idx += count;
/* Ring the doorbell */
bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
}
/**
* bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
* @conn: iscsi connection
* @cmd: driver command structure which is requesting
* a WQE to sent to chip for further processing
*
* prepare and post an iSCSI Login request WQE to CNIC firmware
*/
int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task)
{
struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_login_request *login_wqe;
struct iscsi_login *login_hdr;
u32 dword;
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
login_hdr = (struct iscsi_login *)task->hdr;
login_wqe = (struct bnx2i_login_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
login_wqe->op_code = login_hdr->opcode;
login_wqe->op_attr = login_hdr->flags;
login_wqe->version_max = login_hdr->max_version;
login_wqe->version_min = login_hdr->min_version;
login_wqe->data_length = ntoh24(login_hdr->dlength);
login_wqe->isid_lo = *((u32 *) login_hdr->isid);
login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
login_wqe->tsih = login_hdr->tsih;
login_wqe->itt = task->itt |
(ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
login_wqe->cid = login_hdr->cid;
login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
login_wqe->resp_bd_list_addr_hi =
(u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
(bnx2i_conn->gen_pdu.resp_buf_size <<
ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
login_wqe->resp_buffer = dword;
login_wqe->flags = 0;
login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
login_wqe->bd_list_addr_hi =
(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
login_wqe->num_bds = 1;
login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
return 0;
}
/**
* bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
* @conn: iscsi connection
* @mtask: driver command structure which is requesting
* a WQE to sent to chip for further processing
*
* prepare and post an iSCSI Login request WQE to CNIC firmware
*/
int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *mtask)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_tm *tmfabort_hdr;
struct scsi_cmnd *ref_sc;
struct iscsi_task *ctask;
struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_tmf_request *tmfabort_wqe;
u32 dword;
bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
tmfabort_wqe = (struct bnx2i_tmf_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
tmfabort_wqe->op_code = tmfabort_hdr->opcode;
tmfabort_wqe->op_attr = 0;
tmfabort_wqe->op_attr =
ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
tmfabort_wqe->reserved2 = 0;
tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
if (!ctask || ctask->sc)
/*
* the iscsi layer must have completed the cmd while this
* was starting up.
*/
return 0;
ref_sc = ctask->sc;
if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
else
dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
tmfabort_wqe->bd_list_addr_hi = (u32)
((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
tmfabort_wqe->num_bds = 1;
tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
return 0;
}
/**
* bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
* @conn: iscsi connection
* @cmd: driver command structure which is requesting
* a WQE to sent to chip for further processing
*
* prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
*/
int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
struct bnx2i_cmd *cmd)
{
struct bnx2i_cmd_request *scsi_cmd_wqe;
scsi_cmd_wqe = (struct bnx2i_cmd_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
return 0;
}
/**
* bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
* @conn: iscsi connection
* @cmd: driver command structure which is requesting
* a WQE to sent to chip for further processing
* @ttt: TTT to be used when building pdu header
* @datap: payload buffer pointer
* @data_len: payload data length
* @unsol: indicated whether nopout pdu is unsolicited pdu or
* in response to target's NOPIN w/ TTT != FFFFFFFF
*
* prepare and post a nopout request WQE to CNIC firmware
*/
int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task, u32 ttt,
char *datap, int data_len, int unsol)
{
struct bnx2i_endpoint *ep = bnx2i_conn->ep;
struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_nop_out_request *nopout_wqe;
struct iscsi_nopout *nopout_hdr;
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
nopout_wqe->op_code = nopout_hdr->opcode;
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
u32 tmp = nopout_hdr->lun[0];
/* 57710 requires LUN field to be swapped */
nopout_hdr->lun[0] = nopout_hdr->lun[1];
nopout_hdr->lun[1] = tmp;
}
nopout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
ISCSI_TMF_REQUEST_TYPE_SHIFT));
nopout_wqe->ttt = ttt;
nopout_wqe->flags = 0;
if (!unsol)
nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
else if (nopout_hdr->itt == RESERVED_ITT)
nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
nopout_wqe->data_length = data_len;
if (data_len) {
/* handle payload data, not required in first release */
printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
} else {
nopout_wqe->bd_list_addr_lo = (u32)
bnx2i_conn->hba->mp_bd_dma;
nopout_wqe->bd_list_addr_hi =
(u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
nopout_wqe->num_bds = 1;
}
nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
return 0;
}
/**
* bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
* @conn: iscsi connection
* @cmd: driver command structure which is requesting
* a WQE to sent to chip for further processing
*
* prepare and post logout request WQE to CNIC firmware
*/
int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task)
{
struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_logout_request *logout_wqe;
struct iscsi_logout *logout_hdr;
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
logout_hdr = (struct iscsi_logout *)task->hdr;
logout_wqe = (struct bnx2i_logout_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
logout_wqe->op_code = logout_hdr->opcode;
logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
logout_wqe->op_attr =
logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
logout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
logout_wqe->data_length = 0;
logout_wqe->cid = 0;
logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
logout_wqe->bd_list_addr_hi = (u32)
((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
logout_wqe->num_bds = 1;
logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
return 0;
}
/**
* bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
* @conn: iscsi connection which requires iscsi parameter update
*
* sends down iSCSI Conn Update request to move iSCSI conn to FFP
*/
void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
{
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct bnx2i_hba *hba = bnx2i_conn->hba;
struct kwqe *kwqe_arr[2];
struct iscsi_kwqe_conn_update *update_wqe;
struct iscsi_kwqe_conn_update conn_update_kwqe;
update_wqe = &conn_update_kwqe;
update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
update_wqe->hdr.flags =
(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
/* 5771x requires conn context id to be passed as is */
if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
update_wqe->context_id = bnx2i_conn->ep->ep_cid;
else
update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
update_wqe->conn_flags = 0;
if (conn->hdrdgst_en)
update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
if (conn->datadgst_en)
update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
if (conn->session->initial_r2t_en)
update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
if (conn->session->imm_data_en)
update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
update_wqe->first_burst_length = conn->session->first_burst;
update_wqe->max_burst_length = conn->session->max_burst;
update_wqe->exp_stat_sn = conn->exp_statsn;
update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
update_wqe->session_error_recovery_level = conn->session->erl;
iscsi_conn_printk(KERN_ALERT, conn,
"bnx2i: conn update - MBL 0x%x FBL 0x%x"
"MRDSL_I 0x%x MRDSL_T 0x%x \n",
update_wqe->max_burst_length,
update_wqe->first_burst_length,
update_wqe->max_recv_pdu_length,
update_wqe->max_send_pdu_length);
kwqe_arr[0] = (struct kwqe *) update_wqe;
if (hba->cnic && hba->cnic->submit_kwqes)
hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
}
/**
* bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
* @data: endpoint (transport handle) structure pointer
*
* routine to handle connection offload/destroy request timeout
*/
void bnx2i_ep_ofld_timer(unsigned long data)
{
struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
if (ep->state == EP_STATE_OFLD_START) {
printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
ep->state = EP_STATE_OFLD_FAILED;
} else if (ep->state == EP_STATE_DISCONN_START) {
printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
ep->state = EP_STATE_DISCONN_TIMEDOUT;
} else if (ep->state == EP_STATE_CLEANUP_START) {
printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
ep->state = EP_STATE_CLEANUP_FAILED;
}
wake_up_interruptible(&ep->ofld_wait);
}
static int bnx2i_power_of2(u32 val)
{
u32 power = 0;
if (val & (val - 1))
return power;
val--;
while (val) {
val = val >> 1;
power++;
}
return power;
}
/**
* bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
* @hba: adapter structure pointer
* @cmd: driver command structure which is requesting
* a WQE to sent to chip for further processing
*
* prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
{
struct bnx2i_cleanup_request *cmd_cleanup;
cmd_cleanup =
(struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
cmd_cleanup->itt = cmd->req.itt;
cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
}
/**
* bnx2i_send_conn_destroy - initiates iscsi connection teardown process
* @hba: adapter structure pointer
* @ep: endpoint (transport indentifier) structure
*
* this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
* iscsi connection context clean-up process
*/
void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
{
struct kwqe *kwqe_arr[2];
struct iscsi_kwqe_conn_destroy conn_cleanup;
memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
conn_cleanup.hdr.flags =
(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
/* 5771x requires conn context id to be passed as is */
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
conn_cleanup.context_id = ep->ep_cid;
else
conn_cleanup.context_id = (ep->ep_cid >> 7);
conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
if (hba->cnic && hba->cnic->submit_kwqes)
hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
}
/**
* bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
* @hba: adapter structure pointer
* @ep: endpoint (transport indentifier) structure
*
* 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep)
{
struct kwqe *kwqe_arr[2];
struct iscsi_kwqe_conn_offload1 ofld_req1;
struct iscsi_kwqe_conn_offload2 ofld_req2;
dma_addr_t dma_addr;
int num_kwqes = 2;
u32 *ptbl;
ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
ofld_req1.hdr.flags =
(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
dma_addr = ep->qp.sq_pgtbl_phys;
ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
dma_addr = ep->qp.cq_pgtbl_phys;
ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
ofld_req2.hdr.flags =
(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
dma_addr = ep->qp.rq_pgtbl_phys;
ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
ofld_req2.sq_first_pte.hi = *ptbl++;
ofld_req2.sq_first_pte.lo = *ptbl;
ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
ofld_req2.cq_first_pte.hi = *ptbl++;
ofld_req2.cq_first_pte.lo = *ptbl;
kwqe_arr[0] = (struct kwqe *) &ofld_req1;
kwqe_arr[1] = (struct kwqe *) &ofld_req2;
ofld_req2.num_additional_wqes = 0;
if (hba->cnic && hba->cnic->submit_kwqes)
hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
}
/**
* bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
* @hba: adapter structure pointer
* @ep: endpoint (transport indentifier) structure
*
* 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep)
{
struct kwqe *kwqe_arr[5];
struct iscsi_kwqe_conn_offload1 ofld_req1;
struct iscsi_kwqe_conn_offload2 ofld_req2;
struct iscsi_kwqe_conn_offload3 ofld_req3[1];
dma_addr_t dma_addr;
int num_kwqes = 2;
u32 *ptbl;
ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
ofld_req1.hdr.flags =
(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
ofld_req2.hdr.flags =
(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
ofld_req2.sq_first_pte.hi = *ptbl++;
ofld_req2.sq_first_pte.lo = *ptbl;
ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
ofld_req2.cq_first_pte.hi = *ptbl++;
ofld_req2.cq_first_pte.lo = *ptbl;
kwqe_arr[0] = (struct kwqe *) &ofld_req1;
kwqe_arr[1] = (struct kwqe *) &ofld_req2;
ofld_req2.num_additional_wqes = 1;
memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
ofld_req3[0].qp_first_pte[0].lo = *ptbl;
kwqe_arr[2] = (struct kwqe *) ofld_req3;
/* need if we decide to go with multiple KCQE's per conn */
num_kwqes += 1;
if (hba->cnic && hba->cnic->submit_kwqes)
hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
}
/**
* bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
*
* @hba: adapter structure pointer
* @ep: endpoint (transport indentifier) structure
*
* this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
*/
void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
{
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
bnx2i_5771x_send_conn_ofld_req(hba, ep);
else
bnx2i_570x_send_conn_ofld_req(hba, ep);
}
/**
* setup_qp_page_tables - iscsi QP page table setup function
* @ep: endpoint (transport indentifier) structure
*
* Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
* 64-bit address in big endian format. Whereas 10G/sec (57710) requires
* PT in little endian format
*/
static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
{
int num_pages;
u32 *ptbl;
dma_addr_t page;
int cnic_dev_10g;
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
cnic_dev_10g = 1;
else
cnic_dev_10g = 0;
/* SQ page table */
memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
page = ep->qp.sq_phys;
if (cnic_dev_10g)
ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
else
ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
while (num_pages--) {
if (cnic_dev_10g) {
/* PTE is written in little endian format for 57710 */
*ptbl = (u32) page;
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
page += PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
*ptbl = (u32) page;
ptbl++;
page += PAGE_SIZE;
}
}
/* RQ page table */
memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
page = ep->qp.rq_phys;
if (cnic_dev_10g)
ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
else
ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
while (num_pages--) {
if (cnic_dev_10g) {
/* PTE is written in little endian format for 57710 */
*ptbl = (u32) page;
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
page += PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
*ptbl = (u32) page;
ptbl++;
page += PAGE_SIZE;
}
}
/* CQ page table */
memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
page = ep->qp.cq_phys;
if (cnic_dev_10g)
ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
else
ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
while (num_pages--) {
if (cnic_dev_10g) {
/* PTE is written in little endian format for 57710 */
*ptbl = (u32) page;
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
page += PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
*ptbl = (u32) page;
ptbl++;
page += PAGE_SIZE;
}
}
}
/**
* bnx2i_alloc_qp_resc - allocates required resources for QP.
* @hba: adapter structure pointer
* @ep: endpoint (transport indentifier) structure
*
* Allocate QP (transport layer for iSCSI connection) resources, DMA'able
* memory for SQ/RQ/CQ and page tables. EP structure elements such
* as producer/consumer indexes/pointers, queue sizes and page table
* contents are setup
*/
int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
{
struct bnx2i_5771x_cq_db *cq_db;
ep->hba = hba;
ep->conn = NULL;
ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
/* Allocate page table memory for SQ which is page aligned */
ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
ep->qp.sq_mem_size =
(ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
ep->qp.sq_pgtbl_size =
(ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
ep->qp.sq_pgtbl_size =
(ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
ep->qp.sq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
&ep->qp.sq_pgtbl_phys, GFP_KERNEL);
if (!ep->qp.sq_pgtbl_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
ep->qp.sq_pgtbl_size);
goto mem_alloc_err;
}
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
&ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
goto mem_alloc_err;
}
memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
ep->qp.sq_first_qe = ep->qp.sq_virt;
ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
ep->qp.sq_prod_idx = 0;
ep->qp.sq_cons_idx = 0;
ep->qp.sqe_left = hba->max_sqes;
/* Allocate page table memory for CQ which is page aligned */
ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
ep->qp.cq_mem_size =
(ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
ep->qp.cq_pgtbl_size =
(ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
ep->qp.cq_pgtbl_size =
(ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
ep->qp.cq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
&ep->qp.cq_pgtbl_phys, GFP_KERNEL);
if (!ep->qp.cq_pgtbl_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
ep->qp.cq_pgtbl_size);
goto mem_alloc_err;
}
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
&ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
goto mem_alloc_err;
}
memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
ep->qp.cq_first_qe = ep->qp.cq_virt;
ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
ep->qp.cq_prod_idx = 0;
ep->qp.cq_cons_idx = 0;
ep->qp.cqe_left = hba->max_cqes;
ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
ep->qp.cqe_size = hba->max_cqes;
/* Invalidate all EQ CQE index, req only for 57710 */
cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
/* Allocate page table memory for RQ which is page aligned */
ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
ep->qp.rq_mem_size =
(ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
ep->qp.rq_pgtbl_size =
(ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
ep->qp.rq_pgtbl_size =
(ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
ep->qp.rq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
&ep->qp.rq_pgtbl_phys, GFP_KERNEL);
if (!ep->qp.rq_pgtbl_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
ep->qp.rq_pgtbl_size);
goto mem_alloc_err;
}
/* Allocate memory area for actual RQ element */
ep->qp.rq_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
&ep->qp.rq_phys, GFP_KERNEL);
if (!ep->qp.rq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
ep->qp.rq_mem_size);
goto mem_alloc_err;
}
ep->qp.rq_first_qe = ep->qp.rq_virt;
ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
ep->qp.rq_prod_idx = 0x8000;
ep->qp.rq_cons_idx = 0;
ep->qp.rqe_left = hba->max_rqes;
setup_qp_page_tables(ep);
return 0;
mem_alloc_err:
bnx2i_free_qp_resc(hba, ep);
return -ENOMEM;
}
/**
* bnx2i_free_qp_resc - free memory resources held by QP
* @hba: adapter structure pointer
* @ep: endpoint (transport indentifier) structure
*
* Free QP resources - SQ/RQ/CQ memory and page tables.
*/
void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
{
if (ep->qp.ctx_base) {
iounmap(ep->qp.ctx_base);
ep->qp.ctx_base = NULL;
}
/* Free SQ mem */
if (ep->qp.sq_pgtbl_virt) {
dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
ep->qp.sq_pgtbl_virt = NULL;
ep->qp.sq_pgtbl_phys = 0;
}
if (ep->qp.sq_virt) {
dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
ep->qp.sq_virt, ep->qp.sq_phys);
ep->qp.sq_virt = NULL;
ep->qp.sq_phys = 0;
}
/* Free RQ mem */
if (ep->qp.rq_pgtbl_virt) {
dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
ep->qp.rq_pgtbl_virt = NULL;
ep->qp.rq_pgtbl_phys = 0;
}
if (ep->qp.rq_virt) {
dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
ep->qp.rq_virt, ep->qp.rq_phys);
ep->qp.rq_virt = NULL;
ep->qp.rq_phys = 0;
}
/* Free CQ mem */
if (ep->qp.cq_pgtbl_virt) {
dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
ep->qp.cq_pgtbl_virt = NULL;
ep->qp.cq_pgtbl_phys = 0;
}
if (ep->qp.cq_virt) {
dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
ep->qp.cq_virt, ep->qp.cq_phys);
ep->qp.cq_virt = NULL;
ep->qp.cq_phys = 0;
}
}
/**
* bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
* @hba: adapter structure pointer
*
* Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
* This results in iSCSi support validation and on-chip context manager
* initialization. Firmware completes this handshake with a CQE carrying
* the result of iscsi support validation. Parameter carried by
* iscsi init request determines the number of offloaded connection and
* tolerance level for iscsi protocol violation this hba/chip can support
*/
int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
{
struct kwqe *kwqe_arr[3];
struct iscsi_kwqe_init1 iscsi_init;
struct iscsi_kwqe_init2 iscsi_init2;
int rc = 0;
u64 mask64;
bnx2i_adjust_qp_size(hba);
iscsi_init.flags =
ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
if (en_tcp_dack)
iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
iscsi_init.reserved0 = 0;
iscsi_init.num_cqs = 1;
iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
iscsi_init.hdr.flags =
(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
iscsi_init.dummy_buffer_addr_hi =
(u32) ((u64) hba->dummy_buf_dma >> 32);
hba->ctx_ccell_tasks =
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
iscsi_init.num_tasks_per_conn = hba->max_sqes;
iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
iscsi_init.sq_num_wqes = hba->max_sqes;
iscsi_init.cq_log_wqes_per_page =
(u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
iscsi_init.cq_num_wqes = hba->max_cqes;
iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
(PAGE_SIZE - 1)) / PAGE_SIZE;
iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
(PAGE_SIZE - 1)) / PAGE_SIZE;
iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
iscsi_init.rq_num_wqes = hba->max_rqes;
iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
iscsi_init2.hdr.flags =
(ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
mask64 = 0x0ULL;
mask64 |= (
/* CISCO MDS */
(1UL <<
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
/* HP MSA1510i */
(1UL <<
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
/* EMC */
(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
if (error_mask1)
iscsi_init2.error_bit_map[0] = error_mask1;
else
iscsi_init2.error_bit_map[0] = (u32) mask64;
if (error_mask2)
iscsi_init2.error_bit_map[1] = error_mask2;
else
iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
iscsi_error_mask = mask64;
kwqe_arr[0] = (struct kwqe *) &iscsi_init;
kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
if (hba->cnic && hba->cnic->submit_kwqes)
rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
return rc;
}
/**
* bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
* @conn: iscsi connection
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process SCSI CMD Response CQE & complete the request to SCSI-ML
*/
static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct bnx2i_cmd_response *resp_cqe;
struct bnx2i_cmd *bnx2i_cmd;
struct iscsi_task *task;
struct iscsi_cmd_rsp *hdr;
u32 datalen = 0;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
spin_lock(&session->lock);
task = iscsi_itt_to_task(conn,
resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
if (!task)
goto fail;
bnx2i_cmd = task->dd_data;
if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
conn->datain_pdus_cnt +=
resp_cqe->task_stat.read_stat.num_data_outs;
conn->rxdata_octets +=
bnx2i_cmd->req.total_data_transfer_length;
} else {
conn->dataout_pdus_cnt +=
resp_cqe->task_stat.read_stat.num_data_outs;
conn->r2t_pdus_cnt +=
resp_cqe->task_stat.read_stat.num_r2ts;
conn->txdata_octets +=
bnx2i_cmd->req.total_data_transfer_length;
}
bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
hdr = (struct iscsi_cmd_rsp *)task->hdr;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
hdr->opcode = resp_cqe->op_code;
hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
hdr->response = resp_cqe->response;
hdr->cmd_status = resp_cqe->status;
hdr->flags = resp_cqe->response_flags;
hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
goto done;
if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
datalen = resp_cqe->data_length;
if (datalen < 2)
goto done;
if (datalen > BNX2I_RQ_WQE_SIZE) {
iscsi_conn_printk(KERN_ERR, conn,
"sense data len %d > RQ sz\n",
datalen);
datalen = BNX2I_RQ_WQE_SIZE;
} else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
iscsi_conn_printk(KERN_ERR, conn,
"sense data len %d > conn data\n",
datalen);
datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
}
bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
}
done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
conn->data, datalen);
fail:
spin_unlock(&session->lock);
return 0;
}
/**
* bnx2i_process_login_resp - this function handles iscsi login response
* @session: iscsi session pointer
* @bnx2i_conn: iscsi connection pointer
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process Login Response CQE & complete it to open-iscsi user daemon
*/
static int bnx2i_process_login_resp(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_task *task;
struct bnx2i_login_response *login;
struct iscsi_login_rsp *resp_hdr;
int pld_len;
int pad_len;
login = (struct bnx2i_login_response *) cqe;
spin_lock(&session->lock);
task = iscsi_itt_to_task(conn,
login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
if (!task)
goto done;
resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
resp_hdr->opcode = login->op_code;
resp_hdr->flags = login->response_flags;
resp_hdr->max_version = login->version_max;
resp_hdr->active_version = login->version_active;;
resp_hdr->hlength = 0;
hton24(resp_hdr->dlength, login->data_length);
memcpy(resp_hdr->isid, &login->isid_lo, 6);
resp_hdr->tsih = cpu_to_be16(login->tsih);
resp_hdr->itt = task->hdr->itt;
resp_hdr->statsn = cpu_to_be32(login->stat_sn);
resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
resp_hdr->status_class = login->status_class;
resp_hdr->status_detail = login->status_detail;
pld_len = login->data_length;
bnx2i_conn->gen_pdu.resp_wr_ptr =
bnx2i_conn->gen_pdu.resp_buf + pld_len;
pad_len = 0;
if (pld_len & 0x3)
pad_len = 4 - (pld_len % 4);
if (pad_len) {
int i = 0;
for (i = 0; i < pad_len; i++) {
bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
bnx2i_conn->gen_pdu.resp_wr_ptr++;
}
}
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
bnx2i_conn->gen_pdu.resp_buf,
bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
done:
spin_unlock(&session->lock);
return 0;
}
/**
* bnx2i_process_tmf_resp - this function handles iscsi TMF response
* @session: iscsi session pointer
* @bnx2i_conn: iscsi connection pointer
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process iSCSI TMF Response CQE and wake up the driver eh thread.
*/
static int bnx2i_process_tmf_resp(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_task *task;
struct bnx2i_tmf_response *tmf_cqe;
struct iscsi_tm_rsp *resp_hdr;
tmf_cqe = (struct bnx2i_tmf_response *)cqe;
spin_lock(&session->lock);
task = iscsi_itt_to_task(conn,
tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
if (!task)
goto done;
resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
resp_hdr->opcode = tmf_cqe->op_code;
resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
resp_hdr->itt = task->hdr->itt;
resp_hdr->response = tmf_cqe->response;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
done:
spin_unlock(&session->lock);
return 0;
}
/**
* bnx2i_process_logout_resp - this function handles iscsi logout response
* @session: iscsi session pointer
* @bnx2i_conn: iscsi connection pointer
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process iSCSI Logout Response CQE & make function call to
* notify the user daemon.
*/
static int bnx2i_process_logout_resp(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_task *task;
struct bnx2i_logout_response *logout;
struct iscsi_logout_rsp *resp_hdr;
logout = (struct bnx2i_logout_response *) cqe;
spin_lock(&session->lock);
task = iscsi_itt_to_task(conn,
logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
if (!task)
goto done;
resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
resp_hdr->opcode = logout->op_code;
resp_hdr->flags = logout->response;
resp_hdr->hlength = 0;
resp_hdr->itt = task->hdr->itt;
resp_hdr->statsn = task->hdr->exp_statsn;
resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
done:
spin_unlock(&session->lock);
return 0;
}
/**
* bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
* @session: iscsi session pointer
* @bnx2i_conn: iscsi connection pointer
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process iSCSI NOPIN local completion CQE, frees IIT and command structures
*/
static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct bnx2i_nop_in_msg *nop_in;
struct iscsi_task *task;
nop_in = (struct bnx2i_nop_in_msg *)cqe;
spin_lock(&session->lock);
task = iscsi_itt_to_task(conn,
nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
if (task)
iscsi_put_task(task);
spin_unlock(&session->lock);
}
/**
* bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
* @conn: iscsi connection
*
* Firmware advances RQ producer index for every unsolicited PDU even if
* payload data length is '0'. This function makes corresponding
* adjustments on the driver side to match this f/w behavior
*/
static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
{
char dummy_rq_data[2];
bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
bnx2i_put_rq_buf(bnx2i_conn, 1);
}
/**
* bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
* @session: iscsi session pointer
* @bnx2i_conn: iscsi connection pointer
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process iSCSI target's proactive iSCSI NOPIN request
*/
static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_task *task;
struct bnx2i_nop_in_msg *nop_in;
struct iscsi_nopin *hdr;
u32 itt;
int tgt_async_nop = 0;
nop_in = (struct bnx2i_nop_in_msg *)cqe;
itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
spin_lock(&session->lock);
hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
memset(hdr, 0, sizeof(struct iscsi_hdr));
hdr->opcode = nop_in->op_code;
hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
hdr->ttt = cpu_to_be32(nop_in->ttt);
if (itt == (u16) RESERVED_ITT) {
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
hdr->itt = RESERVED_ITT;
tgt_async_nop = 1;
goto done;
}
/* this is a response to one of our nop-outs */
task = iscsi_itt_to_task(conn, itt);
if (task) {
hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->itt = task->hdr->itt;
hdr->ttt = cpu_to_be32(nop_in->ttt);
memcpy(hdr->lun, nop_in->lun, 8);
}
done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
spin_unlock(&session->lock);
return tgt_async_nop;
}
/**
* bnx2i_process_async_mesg - this function handles iscsi async message
* @session: iscsi session pointer
* @bnx2i_conn: iscsi connection pointer
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process iSCSI ASYNC Message
*/
static void bnx2i_process_async_mesg(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct bnx2i_async_msg *async_cqe;
struct iscsi_async *resp_hdr;
u8 async_event;
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
async_cqe = (struct bnx2i_async_msg *)cqe;
async_event = async_cqe->async_event;
if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
"async: scsi events not supported\n");
return;
}
spin_lock(&session->lock);
resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
resp_hdr->opcode = async_cqe->op_code;
resp_hdr->flags = 0x80;
memcpy(resp_hdr->lun, async_cqe->lun, 8);
resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
resp_hdr->async_event = async_cqe->async_event;
resp_hdr->async_vcode = async_cqe->async_vcode;
resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
(struct iscsi_hdr *)resp_hdr, NULL, 0);
spin_unlock(&session->lock);
}
/**
* bnx2i_process_reject_mesg - process iscsi reject pdu
* @session: iscsi session pointer
* @bnx2i_conn: iscsi connection pointer
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process iSCSI REJECT message
*/
static void bnx2i_process_reject_mesg(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct bnx2i_reject_msg *reject;
struct iscsi_reject *hdr;
reject = (struct bnx2i_reject_msg *) cqe;
if (reject->data_length) {
bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
bnx2i_put_rq_buf(bnx2i_conn, 1);
} else
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
spin_lock(&session->lock);
hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(hdr, 0, sizeof(struct iscsi_hdr));
hdr->opcode = reject->op_code;
hdr->reason = reject->reason;
hton24(hdr->dlength, reject->data_length);
hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
reject->data_length);
spin_unlock(&session->lock);
}
/**
* bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
* @session: iscsi session pointer
* @bnx2i_conn: iscsi connection pointer
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process command cleanup response CQE during conn shutdown or error recovery
*/
static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
struct cqe *cqe)
{
struct bnx2i_cleanup_response *cmd_clean_rsp;
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_task *task;
cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
spin_lock(&session->lock);
task = iscsi_itt_to_task(conn,
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
if (!task)
printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
spin_unlock(&session->lock);
complete(&bnx2i_conn->cmd_cleanup_cmpl);
}
/**
* bnx2i_process_new_cqes - process newly DMA'ed CQE's
* @bnx2i_conn: iscsi connection
*
* this function is called by generic KCQ handler to process all pending CQE's
*/
static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
struct qp_info *qp = &bnx2i_conn->ep->qp;
struct bnx2i_nop_in_msg *nopin;
int tgt_async_msg;
while (1) {
nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
break;
if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
break;
tgt_async_msg = 0;
switch (nopin->op_code) {
case ISCSI_OP_SCSI_CMD_RSP:
case ISCSI_OP_SCSI_DATA_IN:
bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
case ISCSI_OP_LOGIN_RSP:
bnx2i_process_login_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
case ISCSI_OP_SCSI_TMFUNC_RSP:
bnx2i_process_tmf_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
case ISCSI_OP_LOGOUT_RSP:
bnx2i_process_logout_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
case ISCSI_OP_NOOP_IN:
if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
qp->cq_cons_qe))
tgt_async_msg = 1;
break;
case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
qp->cq_cons_qe);
break;
case ISCSI_OP_ASYNC_EVENT:
bnx2i_process_async_mesg(session, bnx2i_conn,
qp->cq_cons_qe);
tgt_async_msg = 1;
break;
case ISCSI_OP_REJECT:
bnx2i_process_reject_mesg(session, bnx2i_conn,
qp->cq_cons_qe);
break;
case ISCSI_OPCODE_CLEANUP_RESPONSE:
bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
default:
printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
nopin->op_code);
}
if (!tgt_async_msg)
bnx2i_conn->ep->num_active_cmds--;
/* clear out in production version only, till beta keep opcode
* field intact, will be helpful in debugging (context dump)
* nopin->op_code = 0;
*/
qp->cqe_exp_seq_sn++;
if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
if (qp->cq_cons_qe == qp->cq_last_qe) {
qp->cq_cons_qe = qp->cq_first_qe;
qp->cq_cons_idx = 0;
} else {
qp->cq_cons_qe++;
qp->cq_cons_idx++;
}
}
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
}
/**
* bnx2i_fastpath_notification - process global event queue (KCQ)
* @hba: adapter structure pointer
* @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
*
* Fast path event notification handler, KCQ entry carries context id
* of the connection that has 1 or more pending CQ entries
*/
static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
struct iscsi_kcqe *new_cqe_kcqe)
{
struct bnx2i_conn *conn;
u32 iscsi_cid;
iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
if (!conn) {
printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
return;
}
if (!conn->ep) {
printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
return;
}
bnx2i_process_new_cqes(conn);
}
/**
* bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
* @hba: adapter structure pointer
* @update_kcqe: kcqe pointer
*
* CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
*/
static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
struct iscsi_kcqe *update_kcqe)
{
struct bnx2i_conn *conn;
u32 iscsi_cid;
iscsi_cid = update_kcqe->iscsi_conn_id;
conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
if (!conn) {
printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
return;
}
if (!conn->ep) {
printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
return;
}
if (update_kcqe->completion_status) {
printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
} else
conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
wake_up_interruptible(&conn->ep->ofld_wait);
}
/**
* bnx2i_recovery_que_add_conn - add connection to recovery queue
* @hba: adapter structure pointer
* @bnx2i_conn: iscsi connection
*
* Add connection to recovery queue and schedule adapter eh worker
*/
static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
struct bnx2i_conn *bnx2i_conn)
{
iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
ISCSI_ERR_CONN_FAILED);
}
/**
* bnx2i_process_tcp_error - process error notification on a given connection
*
* @hba: adapter structure pointer
* @tcp_err: tcp error kcqe pointer
*
* handles tcp level error notifications from FW.
*/
static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
struct iscsi_kcqe *tcp_err)
{
struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
iscsi_cid = tcp_err->iscsi_conn_id;
bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
if (!bnx2i_conn) {
printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
return;
}
printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
iscsi_cid, tcp_err->completion_status);
bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
}
/**
* bnx2i_process_iscsi_error - process error notification on a given connection
* @hba: adapter structure pointer
* @iscsi_err: iscsi error kcqe pointer
*
* handles iscsi error notifications from the FW. Firmware based in initial
* handshake classifies iscsi protocol / TCP rfc violation into either
* warning or error indications. If indication is of "Error" type, driver
* will initiate session recovery for that connection/session. For
* "Warning" type indication, driver will put out a system log message
* (there will be only one message for each type for the life of the
* session, this is to avoid un-necessarily overloading the system)
*/
static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
struct iscsi_kcqe *iscsi_err)
{
struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
char warn_notice[] = "iscsi_warning";
char error_notice[] = "iscsi_error";
char additional_notice[64];
char *message;
int need_recovery;
u64 err_mask64;
iscsi_cid = iscsi_err->iscsi_conn_id;
bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
if (!bnx2i_conn) {
printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
return;
}
err_mask64 = (0x1ULL << iscsi_err->completion_status);
if (err_mask64 & iscsi_error_mask) {
need_recovery = 0;
message = warn_notice;
} else {
need_recovery = 1;
message = error_notice;
}
switch (iscsi_err->completion_status) {
case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
strcpy(additional_notice, "hdr digest err");
break;
case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
strcpy(additional_notice, "data digest err");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
strcpy(additional_notice, "wrong opcode rcvd");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
strcpy(additional_notice, "AHS len > 0 rcvd");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
strcpy(additional_notice, "invalid ITT rcvd");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
strcpy(additional_notice, "wrong StatSN rcvd");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
strcpy(additional_notice, "wrong DataSN rcvd");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
strcpy(additional_notice, "pend R2T violation");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
strcpy(additional_notice, "ERL0, UO");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
strcpy(additional_notice, "ERL0, U1");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
strcpy(additional_notice, "ERL0, U2");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
strcpy(additional_notice, "ERL0, U3");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
strcpy(additional_notice, "ERL0, U4");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
strcpy(additional_notice, "ERL0, U5");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
strcpy(additional_notice, "ERL0, U6");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
strcpy(additional_notice, "invalid resi len");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
strcpy(additional_notice, "MRDSL violation");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
strcpy(additional_notice, "F-bit not set");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
strcpy(additional_notice, "invalid TTT");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
strcpy(additional_notice, "invalid DataSN");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
strcpy(additional_notice, "burst len violation");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
strcpy(additional_notice, "buf offset violation");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
strcpy(additional_notice, "invalid LUN field");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
strcpy(additional_notice, "invalid R2TSN field");
break;
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
strcpy(additional_notice, "invalid cmd len1");
break;
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
strcpy(additional_notice, "invalid cmd len2");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
strcpy(additional_notice,
"pend r2t exceeds MaxOutstandingR2T value");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
strcpy(additional_notice, "TTT is rsvd");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
strcpy(additional_notice, "MBL violation");
break;
#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
strcpy(additional_notice, "data seg len != 0");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
strcpy(additional_notice, "reject pdu len error");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
strcpy(additional_notice, "async pdu len error");
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
strcpy(additional_notice, "nopin pdu len error");
break;
#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
case BNX2_ERR_PEND_R2T_IN_CLEANUP:
strcpy(additional_notice, "pend r2t in cleanup");
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
strcpy(additional_notice, "IP fragments rcvd");
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
strcpy(additional_notice, "IP options error");
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
strcpy(additional_notice, "urgent flag error");
break;
default:
printk(KERN_ALERT "iscsi_err - unknown err %x\n",
iscsi_err->completion_status);
}
if (need_recovery) {
iscsi_conn_printk(KERN_ALERT,
bnx2i_conn->cls_conn->dd_data,
"bnx2i: %s - %s\n",
message, additional_notice);
iscsi_conn_printk(KERN_ALERT,
bnx2i_conn->cls_conn->dd_data,
"conn_err - hostno %d conn %p, "
"iscsi_cid %x cid %x\n",
bnx2i_conn->hba->shost->host_no,
bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
bnx2i_conn->ep->ep_cid);
bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
} else
if (!test_and_set_bit(iscsi_err->completion_status,
(void *) &bnx2i_conn->violation_notified))
iscsi_conn_printk(KERN_ALERT,
bnx2i_conn->cls_conn->dd_data,
"bnx2i: %s - %s\n",
message, additional_notice);
}
/**
* bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
* @hba: adapter structure pointer
* @conn_destroy: conn destroy kcqe pointer
*
* handles connection destroy completion request.
*/
static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
struct iscsi_kcqe *conn_destroy)
{
struct bnx2i_endpoint *ep;
ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
if (!ep) {
printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
"offload request, unexpected complection\n");
return;
}
if (hba != ep->hba) {
printk(KERN_ALERT "conn destroy- error hba mis-match\n");
return;
}
if (conn_destroy->completion_status) {
printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
ep->state = EP_STATE_CLEANUP_FAILED;
} else
ep->state = EP_STATE_CLEANUP_CMPL;
wake_up_interruptible(&ep->ofld_wait);
}
/**
* bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
* @hba: adapter structure pointer
* @ofld_kcqe: conn offload kcqe pointer
*
* handles initial connection offload completion, ep_connect() thread is
* woken-up to continue with LLP connect process
*/
static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
struct iscsi_kcqe *ofld_kcqe)
{
u32 cid_addr;
struct bnx2i_endpoint *ep;
u32 cid_num;
ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
if (!ep) {
printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
return;
}
if (hba != ep->hba) {
printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
return;
}
if (ofld_kcqe->completion_status) {
if (ofld_kcqe->completion_status ==
ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
printk(KERN_ALERT "bnx2i: unable to allocate"
" iSCSI context resources\n");
ep->state = EP_STATE_OFLD_FAILED;
} else {
ep->state = EP_STATE_OFLD_COMPL;
cid_addr = ofld_kcqe->iscsi_conn_context_id;
cid_num = bnx2i_get_cid_num(ep);
ep->ep_cid = cid_addr;
ep->qp.ctx_base = NULL;
}
wake_up_interruptible(&ep->ofld_wait);
}
/**
* bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
* @hba: adapter structure pointer
* @update_kcqe: kcqe pointer
*
* Generic KCQ event handler/dispatcher
*/
static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
u32 num_cqe)
{
struct bnx2i_hba *hba = context;
int i = 0;
struct iscsi_kcqe *ikcqe = NULL;
while (i < num_cqe) {
ikcqe = (struct iscsi_kcqe *) kcqe[i++];
if (ikcqe->op_code ==
ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
bnx2i_fastpath_notification(hba, ikcqe);
else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
bnx2i_process_ofld_cmpl(hba, ikcqe);
else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
bnx2i_process_update_conn_cmpl(hba, ikcqe);
else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
if (ikcqe->completion_status !=
ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
bnx2i_iscsi_license_error(hba, ikcqe->\
completion_status);
else {
set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
bnx2i_get_link_state(hba);
printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
"ISCSI_INIT passed\n",
(u8)hba->pcidev->bus->number,
hba->pci_devno,
(u8)hba->pci_func);
}
} else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
bnx2i_process_iscsi_error(hba, ikcqe);
else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
bnx2i_process_tcp_error(hba, ikcqe);
else
printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
ikcqe->op_code);
}
}
/**
* bnx2i_indicate_netevent - Generic netdev event handler
* @context: adapter structure pointer
* @event: event type
*
* Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
* NETDEV_GOING_DOWN and NETDEV_CHANGE
*/
static void bnx2i_indicate_netevent(void *context, unsigned long event)
{
struct bnx2i_hba *hba = context;
switch (event) {
case NETDEV_UP:
if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
bnx2i_send_fw_iscsi_init_msg(hba);
break;
case NETDEV_DOWN:
clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
break;
case NETDEV_GOING_DOWN:
set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
iscsi_host_for_each_session(hba->shost,
bnx2i_drop_session);
break;
case NETDEV_CHANGE:
bnx2i_get_link_state(hba);
break;
default:
;
}
}
/**
* bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
* @cm_sk: cnic sock structure pointer
*
* function callback exported via bnx2i - cnic driver interface to
* indicate completion of option-2 TCP connect request.
*/
static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
{
struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
ep->state = EP_STATE_CONNECT_FAILED;
else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
ep->state = EP_STATE_CONNECT_COMPL;
else
ep->state = EP_STATE_CONNECT_FAILED;
wake_up_interruptible(&ep->ofld_wait);
}
/**
* bnx2i_cm_close_cmpl - process tcp conn close completion
* @cm_sk: cnic sock structure pointer
*
* function callback exported via bnx2i - cnic driver interface to
* indicate completion of option-2 graceful TCP connect shutdown
*/
static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
{
struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
ep->state = EP_STATE_DISCONN_COMPL;
wake_up_interruptible(&ep->ofld_wait);
}
/**
* bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
* @cm_sk: cnic sock structure pointer
*
* function callback exported via bnx2i - cnic driver interface to
* indicate completion of option-2 abortive TCP connect termination
*/
static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
{
struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
ep->state = EP_STATE_DISCONN_COMPL;
wake_up_interruptible(&ep->ofld_wait);
}
/**
* bnx2i_cm_remote_close - process received TCP FIN
* @hba: adapter structure pointer
* @update_kcqe: kcqe pointer
*
* function callback exported via bnx2i - cnic driver interface to indicate
* async TCP events such as FIN
*/
static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
{
struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
ep->state = EP_STATE_TCP_FIN_RCVD;
if (ep->conn)
bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
}
/**
* bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
* @hba: adapter structure pointer
* @update_kcqe: kcqe pointer
*
* function callback exported via bnx2i - cnic driver interface to
* indicate async TCP events (RST) sent by the peer.
*/
static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
{
struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
ep->state = EP_STATE_TCP_RST_RCVD;
if (ep->conn)
bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
}
static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
char *buf, u16 buflen)
{
struct bnx2i_hba *hba;
hba = bnx2i_find_hba_for_cnic(dev);
if (!hba)
return;
if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
msg_type, buf, buflen))
printk(KERN_ALERT "bnx2i: private nl message send error\n");
}
/**
* bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
* carrying callback function pointers
*
*/
struct cnic_ulp_ops bnx2i_cnic_cb = {
.cnic_init = bnx2i_ulp_init,
.cnic_exit = bnx2i_ulp_exit,
.cnic_start = bnx2i_start,
.cnic_stop = bnx2i_stop,
.indicate_kcqes = bnx2i_indicate_kcqe,
.indicate_netevent = bnx2i_indicate_netevent,
.cm_connect_complete = bnx2i_cm_connect_cmpl,
.cm_close_complete = bnx2i_cm_close_cmpl,
.cm_abort_complete = bnx2i_cm_abort_cmpl,
.cm_remote_close = bnx2i_cm_remote_close,
.cm_remote_abort = bnx2i_cm_remote_abort,
.iscsi_nl_send_msg = bnx2i_send_nl_mesg,
.owner = THIS_MODULE
};
/**
* bnx2i_map_ep_dbell_regs - map connection doorbell registers
* @ep: bnx2i endpoint
*
* maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
* register in BAR #0. Whereas in 57710 these register are accessed by
* mapping BAR #1
*/
int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
{
u32 cid_num;
u32 reg_off;
u32 first_l4l5;
u32 ctx_sz;
u32 config2;
resource_size_t reg_base;
cid_num = bnx2i_get_cid_num(ep);
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
goto arm_cq;
}
reg_base = ep->hba->netdev->base_addr;
if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
(ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
if (ctx_sz)
reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
+ PAGE_SIZE *
(((cid_num - first_l4l5) / ctx_sz) + 256);
else
reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
} else
/* 5709 device in normal node and 5706/5708 devices */
reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
MB_KERNEL_CTX_SIZE);
if (!ep->qp.ctx_base)
return -ENOMEM;
arm_cq:
bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
return 0;
}
/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#include "bnx2i.h"
static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
static int bnx2i_reg_device;
#define DRV_MODULE_NAME "bnx2i"
#define DRV_MODULE_VERSION "2.0.1d"
#define DRV_MODULE_RELDATE "Mar 25, 2009"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static DEFINE_RWLOCK(bnx2i_dev_lock);
unsigned int event_coal_div = 1;
module_param(event_coal_div, int, 0664);
MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
unsigned int en_tcp_dack = 1;
module_param(en_tcp_dack, int, 0664);
MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
unsigned int error_mask1 = 0x00;
module_param(error_mask1, int, 0664);
MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
unsigned int error_mask2 = 0x00;
module_param(error_mask2, int, 0664);
MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
unsigned int sq_size;
module_param(sq_size, int, 0664);
MODULE_PARM_DESC(sq_size, "Configure SQ size");
unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
module_param(rq_size, int, 0664);
MODULE_PARM_DESC(rq_size, "Configure RQ size");
u64 iscsi_error_mask = 0x00;
static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
*
* This function identifies the NX2 device type and sets appropriate
* queue mailbox register access method, 5709 requires driver to
* access MBOX regs using *bin* mode
*/
void bnx2i_identify_device(struct bnx2i_hba *hba)
{
hba->cnic_dev_type = 0;
if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
(hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
(hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
(hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
hba->pci_did == PCI_DEVICE_ID_NX2_57711)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
}
/**
* get_adapter_list_head - returns head of adapter list
*/
struct bnx2i_hba *get_adapter_list_head(void)
{
struct bnx2i_hba *hba = NULL;
struct bnx2i_hba *tmp_hba;
if (!adapter_count)
goto hba_not_found;
read_lock(&bnx2i_dev_lock);
list_for_each_entry(tmp_hba, &adapter_list, link) {
if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
hba = tmp_hba;
break;
}
}
read_unlock(&bnx2i_dev_lock);
hba_not_found:
return hba;
}
/**
* bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
* @cnic: pointer to cnic device instance
*
*/
struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link) {
if (hba->cnic == cnic) {
read_unlock(&bnx2i_dev_lock);
return hba;
}
}
read_unlock(&bnx2i_dev_lock);
return NULL;
}
/**
* bnx2i_start - cnic callback to initialize & start adapter instance
* @handle: transparent handle pointing to adapter structure
*
* This function maps adapter structure to pcidev structure and initiates
* firmware handshake to enable/initialize on chip iscsi components
* This bnx2i - cnic interface api callback is issued after following
* 2 conditions are met -
* a) underlying network interface is up (marked by event 'NETDEV_UP'
* from netdev
* b) bnx2i adapter instance is registered
*/
void bnx2i_start(void *handle)
{
#define BNX2I_INIT_POLL_TIME (1000 / HZ)
struct bnx2i_hba *hba = handle;
int i = HZ;
bnx2i_send_fw_iscsi_init_msg(hba);
while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
}
/**
* bnx2i_stop - cnic callback to shutdown adapter instance
* @handle: transparent handle pointing to adapter structure
*
* driver checks if adapter is already in shutdown mode, if not start
* the shutdown process
*/
void bnx2i_stop(void *handle)
{
struct bnx2i_hba *hba = handle;
/* check if cleanup happened in GOING_DOWN context */
clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
&hba->adapter_state))
iscsi_host_for_each_session(hba->shost,
bnx2i_drop_session);
}
/**
* bnx2i_register_device - register bnx2i adapter instance with the cnic driver
* @hba: Adapter instance to register
*
* registers bnx2i adapter instance with the cnic driver while holding the
* adapter structure lock
*/
void bnx2i_register_device(struct bnx2i_hba *hba)
{
if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
return;
}
hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
spin_lock(&hba->lock);
bnx2i_reg_device++;
spin_unlock(&hba->lock);
set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
}
/**
* bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
*
* registers all bnx2i adapter instances with the cnic driver while holding
* the global resource lock
*/
void bnx2i_reg_dev_all(void)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link)
bnx2i_register_device(hba);
read_unlock(&bnx2i_dev_lock);
}
/**
* bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
* @hba: Adapter instance to unregister
*
* registers bnx2i adapter instance with the cnic driver while holding
* the adapter structure lock
*/
static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
{
if (hba->ofld_conns_active ||
!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
return;
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
spin_lock(&hba->lock);
bnx2i_reg_device--;
spin_unlock(&hba->lock);
/* ep_disconnect could come before NETDEV_DOWN, driver won't
* see NETDEV_DOWN as it already unregistered itself.
*/
hba->adapter_state = 0;
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
}
/**
* bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
*
* unregisters all bnx2i adapter instances with the cnic driver while holding
* the global resource lock
*/
void bnx2i_unreg_dev_all(void)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link)
bnx2i_unreg_one_device(hba);
read_unlock(&bnx2i_dev_lock);
}
/**
* bnx2i_init_one - initialize an adapter instance and allocate memory resources
* @hba: bnx2i adapter instance
* @cnic: cnic device handle
*
* Global resource lock and host adapter lock is held during critical sections
* below. This routine is called from cnic_register_driver() context and
* work horse thread which does majority of device specific initialization
*/
static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
{
int rc;
read_lock(&bnx2i_dev_lock);
if (bnx2i_reg_device &&
!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
if (rc) /* duplicate registration */
printk(KERN_ERR "bnx2i- dev reg failed\n");
spin_lock(&hba->lock);
bnx2i_reg_device++;
hba->age++;
spin_unlock(&hba->lock);
set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
}
read_unlock(&bnx2i_dev_lock);
write_lock(&bnx2i_dev_lock);
list_add_tail(&hba->link, &adapter_list);
adapter_count++;
write_unlock(&bnx2i_dev_lock);
return 0;
}
/**
* bnx2i_ulp_init - initialize an adapter instance
* @dev: cnic device handle
*
* Called from cnic_register_driver() context to initialize all enumerated
* cnic devices. This routine allocate adapter structure and other
* device specific resources.
*/
void bnx2i_ulp_init(struct cnic_dev *dev)
{
struct bnx2i_hba *hba;
/* Allocate a HBA structure for this device */
hba = bnx2i_alloc_hba(dev);
if (!hba) {
printk(KERN_ERR "bnx2i init: hba initialization failed\n");
return;
}
/* Get PCI related information and update hba struct members */
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
if (bnx2i_init_one(hba, dev)) {
printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
bnx2i_free_hba(hba);
} else
hba->cnic = dev;
}
/**
* bnx2i_ulp_exit - shuts down adapter instance and frees all resources
* @dev: cnic device handle
*
*/
void bnx2i_ulp_exit(struct cnic_dev *dev)
{
struct bnx2i_hba *hba;
hba = bnx2i_find_hba_for_cnic(dev);
if (!hba) {
printk(KERN_INFO "bnx2i_ulp_exit: hba not "
"found, dev 0x%p\n", dev);
return;
}
write_lock(&bnx2i_dev_lock);
list_del_init(&hba->link);
adapter_count--;
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
spin_lock(&hba->lock);
bnx2i_reg_device--;
spin_unlock(&hba->lock);
}
write_unlock(&bnx2i_dev_lock);
bnx2i_free_hba(hba);
}
/**
* bnx2i_mod_init - module init entry point
*
* initialize any driver wide global data structures such as endpoint pool,
* tcp port manager/queue, sysfs. finally driver will register itself
* with the cnic module
*/
static int __init bnx2i_mod_init(void)
{
int err;
printk(KERN_INFO "%s", version);
if (!is_power_of_2(sq_size))
sq_size = roundup_pow_of_two(sq_size);
bnx2i_scsi_xport_template =
iscsi_register_transport(&bnx2i_iscsi_transport);
if (!bnx2i_scsi_xport_template) {
printk(KERN_ERR "Could not register bnx2i transport.\n");
err = -ENOMEM;
goto out;
}
err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
if (err) {
printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
goto unreg_xport;
}
return 0;
unreg_xport:
iscsi_unregister_transport(&bnx2i_iscsi_transport);
out:
return err;
}
/**
* bnx2i_mod_exit - module cleanup/exit entry point
*
* Global resource lock and host adapter lock is held during critical sections
* in this function. Driver will browse through the adapter list, cleans-up
* each instance, unregisters iscsi transport name and finally driver will
* unregister itself with the cnic module
*/
static void __exit bnx2i_mod_exit(void)
{
struct bnx2i_hba *hba;
write_lock(&bnx2i_dev_lock);
while (!list_empty(&adapter_list)) {
hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
list_del(&hba->link);
adapter_count--;
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
bnx2i_reg_device--;
}
write_unlock(&bnx2i_dev_lock);
bnx2i_free_hba(hba);
write_lock(&bnx2i_dev_lock);
}
write_unlock(&bnx2i_dev_lock);
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);
}
module_init(bnx2i_mod_init);
module_exit(bnx2i_mod_exit);
/*
* bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2009 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#include <scsi/scsi_tcq.h>
#include <scsi/libiscsi.h>
#include "bnx2i.h"
struct scsi_transport_template *bnx2i_scsi_xport_template;
struct iscsi_transport bnx2i_iscsi_transport;
static struct scsi_host_template bnx2i_host_template;
/*
* Global endpoint resource info
*/
static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
{
int retval = 0;
if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
retval = -EPERM;
return retval;
}
/**
* bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
* @cmd: iscsi cmd struct pointer
* @buf_off: absolute buffer offset
* @start_bd_off: u32 pointer to return the offset within the BD
* indicated by 'start_bd_idx' on which 'buf_off' falls
* @start_bd_idx: index of the BD on which 'buf_off' falls
*
* identifies & marks various bd info for scsi command's imm data,
* unsolicited data and the first solicited data seq.
*/
static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
u32 *start_bd_off, u32 *start_bd_idx)
{
struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
u32 cur_offset = 0;
u32 cur_bd_idx = 0;
if (buf_off) {
while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
cur_offset += bd_tbl->buffer_length;
cur_bd_idx++;
bd_tbl++;
}
}
*start_bd_off = buf_off - cur_offset;
*start_bd_idx = cur_bd_idx;
}
/**
* bnx2i_setup_write_cmd_bd_info - sets up BD various information
* @task: transport layer's cmd struct pointer
*
* identifies & marks various bd info for scsi command's immediate data,
* unsolicited data and first solicited data seq which includes BD start
* index & BD buf off. his function takes into account iscsi parameter such
* as immediate data and unsolicited data is support on this connection.
*/
static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
{
struct bnx2i_cmd *cmd = task->dd_data;
u32 start_bd_offset;
u32 start_bd_idx;
u32 buffer_offset = 0;
u32 cmd_len = cmd->req.total_data_transfer_length;
/* if ImmediateData is turned off & IntialR2T is turned on,
* there will be no immediate or unsolicited data, just return.
*/
if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
return;
/* Immediate data */
buffer_offset += task->imm_count;
if (task->imm_count == cmd_len)
return;
if (iscsi_task_has_unsol_data(task)) {
bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
&start_bd_offset, &start_bd_idx);
cmd->req.ud_buffer_offset = start_bd_offset;
cmd->req.ud_start_bd_index = start_bd_idx;
buffer_offset += task->unsol_r2t.data_length;
}
if (buffer_offset != cmd_len) {
bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
&start_bd_offset, &start_bd_idx);
if ((start_bd_offset > task->conn->session->first_burst) ||
(start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
int i = 0;
iscsi_conn_printk(KERN_ALERT, task->conn,
"bnx2i- error, buf offset 0x%x "
"bd_valid %d use_sg %d\n",
buffer_offset, cmd->io_tbl.bd_valid,
scsi_sg_count(cmd->scsi_cmd));
for (i = 0; i < cmd->io_tbl.bd_valid; i++)
iscsi_conn_printk(KERN_ALERT, task->conn,
"bnx2i err, bd[%d]: len %x\n",
i, cmd->io_tbl.bd_tbl[i].\
buffer_length);
}
cmd->req.sd_buffer_offset = start_bd_offset;
cmd->req.sd_start_bd_index = start_bd_idx;
}
}
/**
* bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
* @hba: adapter instance
* @cmd: iscsi cmd struct pointer
*
* map SG list
*/
static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
{
struct scsi_cmnd *sc = cmd->scsi_cmd;
struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
struct scatterlist *sg;
int byte_count = 0;
int bd_count = 0;
int sg_count;
int sg_len;
u64 addr;
int i;
BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
sg_count = scsi_dma_map(sc);
scsi_for_each_sg(sc, sg, sg_count, i) {
sg_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
bd[bd_count].buffer_addr_hi = addr >> 32;
bd[bd_count].buffer_length = sg_len;
bd[bd_count].flags = 0;
if (bd_count == 0)
bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
byte_count += sg_len;
bd_count++;
}
if (bd_count)
bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
BUG_ON(byte_count != scsi_bufflen(sc));
return bd_count;
}
/**
* bnx2i_iscsi_map_sg_list - maps SG list
* @cmd: iscsi cmd struct pointer
*
* creates BD list table for the command
*/
static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
{
int bd_count;
bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
if (!bd_count) {
struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
bd[0].buffer_length = bd[0].flags = 0;
}
cmd->io_tbl.bd_valid = bd_count;
}
/**
* bnx2i_iscsi_unmap_sg_list - unmaps SG list
* @cmd: iscsi cmd struct pointer
*
* unmap IO buffers and invalidate the BD table
*/
void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
{
struct scsi_cmnd *sc = cmd->scsi_cmd;
if (cmd->io_tbl.bd_valid && sc) {
scsi_dma_unmap(sc);
cmd->io_tbl.bd_valid = 0;
}
}
static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
{
memset(&cmd->req, 0x00, sizeof(cmd->req));
cmd->req.op_code = 0xFF;
cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
cmd->req.bd_list_addr_hi =
(u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
}
/**
* bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
* @hba: pointer to adapter instance
* @conn: pointer to iscsi connection
* @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
*
* update iscsi cid table entry with connection pointer. This enables
* driver to quickly get hold of connection structure pointer in
* completion/interrupt thread using iscsi context ID
*/
static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
struct bnx2i_conn *bnx2i_conn,
u32 iscsi_cid)
{
if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
"conn bind - entry #%d not free\n", iscsi_cid);
return -EBUSY;
}
hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
return 0;
}
/**
* bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
* @hba: pointer to adapter instance
* @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
*/
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
u16 iscsi_cid)
{
if (!hba->cid_que.conn_cid_tbl) {
printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
return NULL;
} else if (iscsi_cid >= hba->max_active_conns) {
printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
return NULL;
}
return hba->cid_que.conn_cid_tbl[iscsi_cid];
}
/**
* bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
* @hba: pointer to adapter instance
*/
static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
{
int idx;
if (!hba->cid_que.cid_free_cnt)
return -1;
idx = hba->cid_que.cid_q_cons_idx;
hba->cid_que.cid_q_cons_idx++;
if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
hba->cid_que.cid_q_cons_idx = 0;
hba->cid_que.cid_free_cnt--;
return hba->cid_que.cid_que[idx];
}
/**
* bnx2i_free_iscsi_cid - returns tcp port to free list
* @hba: pointer to adapter instance
* @iscsi_cid: iscsi context ID to free
*/
static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
{
int idx;
if (iscsi_cid == (u16) -1)
return;
hba->cid_que.cid_free_cnt++;
idx = hba->cid_que.cid_q_prod_idx;
hba->cid_que.cid_que[idx] = iscsi_cid;
hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
hba->cid_que.cid_q_prod_idx++;
if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
hba->cid_que.cid_q_prod_idx = 0;
}
/**
* bnx2i_setup_free_cid_que - sets up free iscsi cid queue
* @hba: pointer to adapter instance
*
* allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
* and initialize table attributes
*/
static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
{
int mem_size;
int i;
mem_size = hba->max_active_conns * sizeof(u32);
mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
if (!hba->cid_que.cid_que_base)
return -ENOMEM;
mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
if (!hba->cid_que.conn_cid_tbl) {
kfree(hba->cid_que.cid_que_base);
hba->cid_que.cid_que_base = NULL;
return -ENOMEM;
}
hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
hba->cid_que.cid_q_prod_idx = 0;
hba->cid_que.cid_q_cons_idx = 0;
hba->cid_que.cid_q_max_idx = hba->max_active_conns;
hba->cid_que.cid_free_cnt = hba->max_active_conns;
for (i = 0; i < hba->max_active_conns; i++) {
hba->cid_que.cid_que[i] = i;
hba->cid_que.conn_cid_tbl[i] = NULL;
}
return 0;
}
/**
* bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
* @hba: pointer to adapter instance
*/
static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
{
kfree(hba->cid_que.cid_que_base);
hba->cid_que.cid_que_base = NULL;
kfree(hba->cid_que.conn_cid_tbl);
hba->cid_que.conn_cid_tbl = NULL;
}
/**
* bnx2i_alloc_ep - allocates ep structure from global pool
* @hba: pointer to adapter instance
*
* routine allocates a free endpoint structure from global pool and
* a tcp port to be used for this connection. Global resource lock,
* 'bnx2i_resc_lock' is held while accessing shared global data structures
*/
static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
{
struct iscsi_endpoint *ep;
struct bnx2i_endpoint *bnx2i_ep;
ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
if (!ep) {
printk(KERN_ERR "bnx2i: Could not allocate ep\n");
return NULL;
}
bnx2i_ep = ep->dd_data;
INIT_LIST_HEAD(&bnx2i_ep->link);
bnx2i_ep->state = EP_STATE_IDLE;
bnx2i_ep->hba = hba;
bnx2i_ep->hba_age = hba->age;
hba->ofld_conns_active++;
init_waitqueue_head(&bnx2i_ep->ofld_wait);
return ep;
}
/**
* bnx2i_free_ep - free endpoint
* @ep: pointer to iscsi endpoint structure
*/
static void bnx2i_free_ep(struct iscsi_endpoint *ep)
{
struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
unsigned long flags;
spin_lock_irqsave(&bnx2i_resc_lock, flags);
bnx2i_ep->state = EP_STATE_IDLE;
bnx2i_ep->hba->ofld_conns_active--;
bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
if (bnx2i_ep->conn) {
bnx2i_ep->conn->ep = NULL;
bnx2i_ep->conn = NULL;
}
bnx2i_ep->hba = NULL;
spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
iscsi_destroy_endpoint(ep);
}
/**
* bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
* @hba: adapter instance pointer
* @session: iscsi session pointer
* @cmd: iscsi command structure
*/
static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
struct bnx2i_cmd *cmd)
{
struct io_bdt *io = &cmd->io_tbl;
struct iscsi_bd *bd;
io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
&io->bd_tbl_dma, GFP_KERNEL);
if (!io->bd_tbl) {
iscsi_session_printk(KERN_ERR, session, "Could not "
"allocate bdt.\n");
return -ENOMEM;
}
io->bd_valid = 0;
return 0;
}
/**
* bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
* @hba: adapter instance pointer
* @session: iscsi session pointer
* @cmd: iscsi command structure
*/
static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
struct iscsi_session *session)
{
int i;
for (i = 0; i < session->cmds_max; i++) {
struct iscsi_task *task = session->cmds[i];
struct bnx2i_cmd *cmd = task->dd_data;
if (cmd->io_tbl.bd_tbl)
dma_free_coherent(&hba->pcidev->dev,
ISCSI_MAX_BDS_PER_CMD *
sizeof(struct iscsi_bd),
cmd->io_tbl.bd_tbl,
cmd->io_tbl.bd_tbl_dma);
}
}
/**
* bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
* @hba: adapter instance pointer
* @session: iscsi session pointer
*/
static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
struct iscsi_session *session)
{
int i;
for (i = 0; i < session->cmds_max; i++) {
struct iscsi_task *task = session->cmds[i];
struct bnx2i_cmd *cmd = task->dd_data;
/* Anil */
task->hdr = &cmd->hdr;
task->hdr_max = sizeof(struct iscsi_hdr);
if (bnx2i_alloc_bdt(hba, session, cmd))
goto free_bdts;
}
return 0;
free_bdts:
bnx2i_destroy_cmd_pool(hba, session);
return -ENOMEM;
}
/**
* bnx2i_setup_mp_bdt - allocate BD table resources
* @hba: pointer to adapter structure
*
* Allocate memory for dummy buffer and associated BD
* table to be used by middle path (MP) requests
*/
static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
{
int rc = 0;
struct iscsi_bd *mp_bdt;
u64 addr;
hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
&hba->mp_bd_dma, GFP_KERNEL);
if (!hba->mp_bd_tbl) {
printk(KERN_ERR "unable to allocate Middle Path BDT\n");
rc = -1;
goto out;
}
hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
&hba->dummy_buf_dma, GFP_KERNEL);
if (!hba->dummy_buffer) {
printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
rc = -1;
goto out;
}
mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
addr = (unsigned long) hba->dummy_buf_dma;
mp_bdt->buffer_addr_lo = addr & 0xffffffff;
mp_bdt->buffer_addr_hi = addr >> 32;
mp_bdt->buffer_length = PAGE_SIZE;
mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
ISCSI_BD_FIRST_IN_BD_CHAIN;
out:
return rc;
}
/**
* bnx2i_free_mp_bdt - releases ITT back to free pool
* @hba: pointer to adapter instance
*
* free MP dummy buffer and associated BD table
*/
static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
{
if (hba->mp_bd_tbl) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
}
if (hba->dummy_buffer) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
hba->dummy_buffer, hba->dummy_buf_dma);
hba->dummy_buffer = NULL;
}
return;
}
/**
* bnx2i_drop_session - notifies iscsid of connection error.
* @hba: adapter instance pointer
* @session: iscsi session pointer
*
* This notifies iscsid that there is a error, so it can initiate
* recovery.
*
* This relies on caller using the iscsi class iterator so the object
* is refcounted and does not disapper from under us.
*/
void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
{
iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
}
/**
* bnx2i_ep_destroy_list_add - add an entry to EP destroy list
* @hba: pointer to adapter instance
* @ep: pointer to endpoint (transport indentifier) structure
*
* EP destroy queue manager
*/
static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep)
{
write_lock_bh(&hba->ep_rdwr_lock);
list_add_tail(&ep->link, &hba->ep_destroy_list);
write_unlock_bh(&hba->ep_rdwr_lock);
return 0;
}
/**
* bnx2i_ep_destroy_list_del - add an entry to EP destroy list
*
* @hba: pointer to adapter instance
* @ep: pointer to endpoint (transport indentifier) structure
*
* EP destroy queue manager
*/
static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep)
{
write_lock_bh(&hba->ep_rdwr_lock);
list_del_init(&ep->link);
write_unlock_bh(&hba->ep_rdwr_lock);
return 0;
}
/**
* bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
* @hba: pointer to adapter instance
* @ep: pointer to endpoint (transport indentifier) structure
*
* pending conn offload completion queue manager
*/
static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep)
{
write_lock_bh(&hba->ep_rdwr_lock);
list_add_tail(&ep->link, &hba->ep_ofld_list);
write_unlock_bh(&hba->ep_rdwr_lock);
return 0;
}
/**
* bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
* @hba: pointer to adapter instance
* @ep: pointer to endpoint (transport indentifier) structure
*
* pending conn offload completion queue manager
*/
static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep)
{
write_lock_bh(&hba->ep_rdwr_lock);
list_del_init(&ep->link);
write_unlock_bh(&hba->ep_rdwr_lock);
return 0;
}
/**
* bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
*
* @hba: pointer to adapter instance
* @iscsi_cid: iscsi context ID to find
*
*/
struct bnx2i_endpoint *
bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
{
struct list_head *list;
struct list_head *tmp;
struct bnx2i_endpoint *ep;
read_lock_bh(&hba->ep_rdwr_lock);
list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
ep = (struct bnx2i_endpoint *)list;
if (ep->ep_iscsi_cid == iscsi_cid)
break;
ep = NULL;
}
read_unlock_bh(&hba->ep_rdwr_lock);
if (!ep)
printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
return ep;
}
/**
* bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
* @hba: pointer to adapter instance
* @iscsi_cid: iscsi context ID to find
*
*/
struct bnx2i_endpoint *
bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
{
struct list_head *list;
struct list_head *tmp;
struct bnx2i_endpoint *ep;
read_lock_bh(&hba->ep_rdwr_lock);
list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
ep = (struct bnx2i_endpoint *)list;
if (ep->ep_iscsi_cid == iscsi_cid)
break;
ep = NULL;
}
read_unlock_bh(&hba->ep_rdwr_lock);
if (!ep)
printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
return ep;
}
/**
* bnx2i_setup_host_queue_size - assigns shost->can_queue param
* @hba: pointer to adapter instance
* @shost: scsi host pointer
*
* Initializes 'can_queue' parameter based on how many outstanding commands
* the device can handle. Each device 5708/5709/57710 has different
* capabilities
*/
static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
struct Scsi_Host *shost)
{
if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
else
shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
}
/**
* bnx2i_alloc_hba - allocate and init adapter instance
* @cnic: cnic device pointer
*
* allocate & initialize adapter structure and call other
* support routines to do per adapter initialization
*/
struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
{
struct Scsi_Host *shost;
struct bnx2i_hba *hba;
shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
if (!shost)
return NULL;
shost->dma_boundary = cnic->pcidev->dma_mask;
shost->transportt = bnx2i_scsi_xport_template;
shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
shost->max_channel = 0;
shost->max_lun = 512;
shost->max_cmd_len = 16;
hba = iscsi_host_priv(shost);
hba->shost = shost;
hba->netdev = cnic->netdev;
/* Get PCI related information and update hba struct members */
hba->pcidev = cnic->pcidev;
pci_dev_get(hba->pcidev);
hba->pci_did = hba->pcidev->device;
hba->pci_vid = hba->pcidev->vendor;
hba->pci_sdid = hba->pcidev->subsystem_device;
hba->pci_svid = hba->pcidev->subsystem_vendor;
hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
bnx2i_identify_device(hba);
bnx2i_identify_device(hba);
bnx2i_setup_host_queue_size(hba, shost);
if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
hba->regview = ioremap_nocache(hba->netdev->base_addr,
BNX2_MQ_CONFIG2);
if (!hba->regview)
goto ioreg_map_err;
} else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
if (!hba->regview)
goto ioreg_map_err;
}
if (bnx2i_setup_mp_bdt(hba))
goto mp_bdt_mem_err;
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
rwlock_init(&hba->ep_rdwr_lock);
hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
/* different values for 5708/5709/57710 */
hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
if (bnx2i_setup_free_cid_que(hba))
goto cid_que_err;
/* SQ/RQ/CQ size can be changed via sysfx interface */
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
hba->max_sqes = sq_size;
else
hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
} else { /* 5706/5708/5709 */
if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
hba->max_sqes = sq_size;
else
hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
}
hba->max_rqes = rq_size;
hba->max_cqes = hba->max_sqes + rq_size;
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
} else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
hba->num_ccell = hba->max_sqes / 2;
spin_lock_init(&hba->lock);
mutex_init(&hba->net_dev_lock);
if (iscsi_host_add(shost, &hba->pcidev->dev))
goto free_dump_mem;
return hba;
free_dump_mem:
bnx2i_release_free_cid_que(hba);
cid_que_err:
bnx2i_free_mp_bdt(hba);
mp_bdt_mem_err:
if (hba->regview) {
iounmap(hba->regview);
hba->regview = NULL;
}
ioreg_map_err:
pci_dev_put(hba->pcidev);
scsi_host_put(shost);
return NULL;
}
/**
* bnx2i_free_hba- releases hba structure and resources held by the adapter
* @hba: pointer to adapter instance
*
* free adapter structure and call various cleanup routines.
*/
void bnx2i_free_hba(struct bnx2i_hba *hba)
{
struct Scsi_Host *shost = hba->shost;
iscsi_host_remove(shost);
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
pci_dev_put(hba->pcidev);
if (hba->regview) {
iounmap(hba->regview);
hba->regview = NULL;
}
bnx2i_free_mp_bdt(hba);
bnx2i_release_free_cid_que(hba);
iscsi_host_free(shost);
}
/**
* bnx2i_conn_free_login_resources - free DMA resources used for login process
* @hba: pointer to adapter instance
* @bnx2i_conn: iscsi connection pointer
*
* Login related resources, mostly BDT & payload DMA memory is freed
*/
static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
struct bnx2i_conn *bnx2i_conn)
{
if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
bnx2i_conn->gen_pdu.resp_bd_tbl,
bnx2i_conn->gen_pdu.resp_bd_dma);
bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
}
if (bnx2i_conn->gen_pdu.req_bd_tbl) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
}
if (bnx2i_conn->gen_pdu.resp_buf) {
dma_free_coherent(&hba->pcidev->dev,
ISCSI_DEF_MAX_RECV_SEG_LEN,
bnx2i_conn->gen_pdu.resp_buf,
bnx2i_conn->gen_pdu.resp_dma_addr);
bnx2i_conn->gen_pdu.resp_buf = NULL;
}
if (bnx2i_conn->gen_pdu.req_buf) {
dma_free_coherent(&hba->pcidev->dev,
ISCSI_DEF_MAX_RECV_SEG_LEN,
bnx2i_conn->gen_pdu.req_buf,
bnx2i_conn->gen_pdu.req_dma_addr);
bnx2i_conn->gen_pdu.req_buf = NULL;
}
}
/**
* bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
* @hba: pointer to adapter instance
* @bnx2i_conn: iscsi connection pointer
*
* Mgmt task DNA resources are allocated in this routine.
*/
static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
struct bnx2i_conn *bnx2i_conn)
{
/* Allocate memory for login request/response buffers */
bnx2i_conn->gen_pdu.req_buf =
dma_alloc_coherent(&hba->pcidev->dev,
ISCSI_DEF_MAX_RECV_SEG_LEN,
&bnx2i_conn->gen_pdu.req_dma_addr,
GFP_KERNEL);
if (bnx2i_conn->gen_pdu.req_buf == NULL)
goto login_req_buf_failure;
bnx2i_conn->gen_pdu.req_buf_size = 0;
bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
bnx2i_conn->gen_pdu.resp_buf =
dma_alloc_coherent(&hba->pcidev->dev,
ISCSI_DEF_MAX_RECV_SEG_LEN,
&bnx2i_conn->gen_pdu.resp_dma_addr,
GFP_KERNEL);
if (bnx2i_conn->gen_pdu.resp_buf == NULL)
goto login_resp_buf_failure;
bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
bnx2i_conn->gen_pdu.req_bd_tbl =
dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
&bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
goto login_req_bd_tbl_failure;
bnx2i_conn->gen_pdu.resp_bd_tbl =
dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
&bnx2i_conn->gen_pdu.resp_bd_dma,
GFP_KERNEL);
if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
goto login_resp_bd_tbl_failure;
return 0;
login_resp_bd_tbl_failure:
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
login_req_bd_tbl_failure:
dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
bnx2i_conn->gen_pdu.resp_buf,
bnx2i_conn->gen_pdu.resp_dma_addr);
bnx2i_conn->gen_pdu.resp_buf = NULL;
login_resp_buf_failure:
dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
bnx2i_conn->gen_pdu.req_buf,
bnx2i_conn->gen_pdu.req_dma_addr);
bnx2i_conn->gen_pdu.req_buf = NULL;
login_req_buf_failure:
iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
"login resource alloc failed!!\n");
return -ENOMEM;
}
/**
* bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
* @bnx2i_conn: iscsi connection pointer
*
* Allocates buffers and BD tables before shipping requests to cnic
* for PDUs prepared by 'iscsid' daemon
*/
static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
{
struct iscsi_bd *bd_tbl;
bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
bd_tbl->buffer_addr_hi =
(u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
bnx2i_conn->gen_pdu.req_buf;
bd_tbl->reserved0 = 0;
bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
ISCSI_BD_FIRST_IN_BD_CHAIN;
bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
bd_tbl->reserved0 = 0;
bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
ISCSI_BD_FIRST_IN_BD_CHAIN;
}
/**
* bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
* @task: transport layer task pointer
*
* called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
* Nop-out and Logout requests flow through this path.
*/
static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
{
struct bnx2i_cmd *cmd = task->dd_data;
struct bnx2i_conn *bnx2i_conn = cmd->conn;
int rc = 0;
char *buf;
int data_len;
bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_LOGIN:
bnx2i_send_iscsi_login(bnx2i_conn, task);
break;
case ISCSI_OP_NOOP_OUT:
data_len = bnx2i_conn->gen_pdu.req_buf_size;
buf = bnx2i_conn->gen_pdu.req_buf;
if (data_len)
rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
RESERVED_ITT,
buf, data_len, 1);
else
rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
RESERVED_ITT,
NULL, 0, 1);
break;
case ISCSI_OP_LOGOUT:
rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
break;
default:
iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
"send_gen: unsupported op 0x%x\n",
task->hdr->opcode);
}
return rc;
}
/**********************************************************************
* SCSI-ML Interface
**********************************************************************/
/**
* bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
* @sc: SCSI-ML command pointer
* @cmd: iscsi cmd pointer
*/
static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
{
u32 dword;
int lpcnt;
u8 *srcp;
u32 *dstp;
u32 scsi_lun[2];
int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
srcp = (u8 *) sc->cmnd;
dstp = (u32 *) cmd->req.cdb;
while (lpcnt--) {
memcpy(&dword, (const void *) srcp, 4);
*dstp = cpu_to_be32(dword);
srcp += 4;
dstp++;
}
if (sc->cmd_len & 0x3) {
dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
*dstp = cpu_to_be32(dword);
}
}
static void bnx2i_cleanup_task(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct bnx2i_hba *hba = bnx2i_conn->hba;
/*
* mgmt task or cmd was never sent to us to transmit.
*/
if (!task->sc || task->state == ISCSI_TASK_PENDING)
return;
/*
* need to clean-up task context to claim dma buffers
*/
if (task->state == ISCSI_TASK_ABRT_TMF) {
bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
spin_unlock_bh(&conn->session->lock);
wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
spin_lock_bh(&conn->session->lock);
}
bnx2i_iscsi_unmap_sg_list(task->dd_data);
}
/**
* bnx2i_mtask_xmit - transmit mtask to chip for further processing
* @conn: transport layer conn structure pointer
* @task: transport layer command structure pointer
*/
static int
bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct bnx2i_cmd *cmd = task->dd_data;
memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
bnx2i_setup_cmd_wqe_template(cmd);
bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
if (task->data_count) {
memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
task->data_count);
bnx2i_conn->gen_pdu.req_wr_ptr =
bnx2i_conn->gen_pdu.req_buf + task->data_count;
}
cmd->conn = conn->dd_data;
cmd->scsi_cmd = NULL;
return bnx2i_iscsi_send_generic_request(task);
}
/**
* bnx2i_task_xmit - transmit iscsi command to chip for further processing
* @task: transport layer command structure pointer
*
* maps SG buffers and send request to chip/firmware in the form of SQ WQE
*/
static int bnx2i_task_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
struct bnx2i_hba *hba = iscsi_host_priv(shost);
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct scsi_cmnd *sc = task->sc;
struct bnx2i_cmd *cmd = task->dd_data;
struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
return -ENOTCONN;
if (!bnx2i_conn->is_bound)
return -ENOTCONN;
/*
* If there is no scsi_cmnd this must be a mgmt task
*/
if (!sc)
return bnx2i_mtask_xmit(conn, task);
bnx2i_setup_cmd_wqe_template(cmd);
cmd->req.op_code = ISCSI_OP_SCSI_CMD;
cmd->conn = bnx2i_conn;
cmd->scsi_cmd = sc;
cmd->req.total_data_transfer_length = scsi_bufflen(sc);
cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
bnx2i_iscsi_map_sg_list(cmd);
bnx2i_cpy_scsi_cdb(sc, cmd);
cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
if (sc->sc_data_direction == DMA_TO_DEVICE) {
cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
cmd->req.itt = task->itt |
(ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
bnx2i_setup_write_cmd_bd_info(task);
} else {
if (scsi_bufflen(sc))
cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
cmd->req.itt = task->itt |
(ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
}
cmd->req.num_bds = cmd->io_tbl.bd_valid;
if (!cmd->io_tbl.bd_valid) {
cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
cmd->req.num_bds = 1;
}
bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
return 0;
}
/**
* bnx2i_session_create - create a new iscsi session
* @cmds_max: max commands supported
* @qdepth: scsi queue depth to support
* @initial_cmdsn: initial iscsi CMDSN to be used for this session
*
* Creates a new iSCSI session instance on given device.
*/
static struct iscsi_cls_session *
bnx2i_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
uint32_t initial_cmdsn)
{
struct Scsi_Host *shost;
struct iscsi_cls_session *cls_session;
struct bnx2i_hba *hba;
struct bnx2i_endpoint *bnx2i_ep;
if (!ep) {
printk(KERN_ERR "bnx2i: missing ep.\n");
return NULL;
}
bnx2i_ep = ep->dd_data;
shost = bnx2i_ep->hba->shost;
hba = iscsi_host_priv(shost);
if (bnx2i_adapter_ready(hba))
return NULL;
/*
* user can override hw limit as long as it is within
* the min/max.
*/
if (cmds_max > hba->max_sqes)
cmds_max = hba->max_sqes;
else if (cmds_max < BNX2I_SQ_WQES_MIN)
cmds_max = BNX2I_SQ_WQES_MIN;
cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
cmds_max, sizeof(struct bnx2i_cmd),
initial_cmdsn, ISCSI_MAX_TARGET);
if (!cls_session)
return NULL;
if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
goto session_teardown;
return cls_session;
session_teardown:
iscsi_session_teardown(cls_session);
return NULL;
}
/**
* bnx2i_session_destroy - destroys iscsi session
* @cls_session: pointer to iscsi cls session
*
* Destroys previously created iSCSI session instance and releases
* all resources held by it
*/
static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct bnx2i_hba *hba = iscsi_host_priv(shost);
bnx2i_destroy_cmd_pool(hba, session);
iscsi_session_teardown(cls_session);
}
/**
* bnx2i_conn_create - create iscsi connection instance
* @cls_session: pointer to iscsi cls session
* @cid: iscsi cid as per rfc (not NX2's CID terminology)
*
* Creates a new iSCSI connection instance for a given session
*/
static struct iscsi_cls_conn *
bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct bnx2i_hba *hba = iscsi_host_priv(shost);
struct bnx2i_conn *bnx2i_conn;
struct iscsi_cls_conn *cls_conn;
struct iscsi_conn *conn;
cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
cid);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
bnx2i_conn = conn->dd_data;
bnx2i_conn->cls_conn = cls_conn;
bnx2i_conn->hba = hba;
/* 'ep' ptr will be assigned in bind() call */
bnx2i_conn->ep = NULL;
init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
iscsi_conn_printk(KERN_ALERT, conn,
"conn_new: login resc alloc failed!!\n");
goto free_conn;
}
return cls_conn;
free_conn:
iscsi_conn_teardown(cls_conn);
return NULL;
}
/**
* bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
* @cls_session: pointer to iscsi cls session
* @cls_conn: pointer to iscsi cls conn
* @transport_fd: 64-bit EP handle
* @is_leading: leading connection on this session?
*
* Binds together iSCSI session instance, iSCSI connection instance
* and the TCP connection. This routine returns error code if
* TCP connection does not belong on the device iSCSI sess/conn
* is bound
*/
static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_fd, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct bnx2i_hba *hba = iscsi_host_priv(shost);
struct bnx2i_endpoint *bnx2i_ep;
struct iscsi_endpoint *ep;
int ret_code;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
return -EINVAL;
bnx2i_ep = ep->dd_data;
if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
(bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
/* Peer disconnect via' FIN or RST */
return -EINVAL;
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
if (bnx2i_ep->hba != hba) {
/* Error - TCP connection does not belong to this device
*/
iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
"conn bind, ep=0x%p (%s) does not",
bnx2i_ep, bnx2i_ep->hba->netdev->name);
iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
"belong to hba (%s)\n",
hba->netdev->name);
return -EEXIST;
}
bnx2i_ep->conn = bnx2i_conn;
bnx2i_conn->ep = bnx2i_ep;
bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
bnx2i_conn->is_bound = 1;
ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
bnx2i_ep->ep_iscsi_cid);
/* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
* driver needs to explicitly replenish RQ index during setup.
*/
if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
bnx2i_put_rq_buf(bnx2i_conn, 0);
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
return ret_code;
}
/**
* bnx2i_conn_destroy - destroy iscsi connection instance & release resources
* @cls_conn: pointer to iscsi cls conn
*
* Destroy an iSCSI connection instance and release memory resources held by
* this connection
*/
static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct Scsi_Host *shost;
struct bnx2i_hba *hba;
shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
hba = iscsi_host_priv(shost);
bnx2i_conn_free_login_resources(hba, bnx2i_conn);
iscsi_conn_teardown(cls_conn);
}
/**
* bnx2i_conn_get_param - return iscsi connection parameter to caller
* @cls_conn: pointer to iscsi cls conn
* @param: parameter type identifier
* @buf: buffer pointer
*
* returns iSCSI connection parameters
*/
static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
int len = 0;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
if (bnx2i_conn->ep)
len = sprintf(buf, "%hu\n",
bnx2i_conn->ep->cm_sk->dst_port);
break;
case ISCSI_PARAM_CONN_ADDRESS:
if (bnx2i_conn->ep)
len = sprintf(buf, NIPQUAD_FMT "\n",
NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
break;
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
return len;
}
/**
* bnx2i_host_get_param - returns host (adapter) related parameters
* @shost: scsi host pointer
* @param: parameter type identifier
* @buf: buffer pointer
*/
static int bnx2i_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct bnx2i_hba *hba = iscsi_host_priv(shost);
int len = 0;
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
break;
case ISCSI_HOST_PARAM_NETDEV_NAME:
len = sprintf(buf, "%s\n", hba->netdev->name);
break;
default:
return iscsi_host_get_param(shost, param, buf);
}
return len;
}
/**
* bnx2i_conn_start - completes iscsi connection migration to FFP
* @cls_conn: pointer to iscsi cls conn
*
* last call in FFP migration to handover iscsi conn to the driver
*/
static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
bnx2i_update_iscsi_conn(conn);
/*
* this should normally not sleep for a long time so it should
* not disrupt the caller.
*/
bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
add_timer(&bnx2i_conn->ep->ofld_timer);
/* update iSCSI context for this conn, wait for CNIC to complete */
wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
if (signal_pending(current))
flush_signals(current);
del_timer_sync(&bnx2i_conn->ep->ofld_timer);
iscsi_conn_start(cls_conn);
return 0;
}
/**
* bnx2i_conn_get_stats - returns iSCSI stats
* @cls_conn: pointer to iscsi cls conn
* @stats: pointer to iscsi statistic struct
*/
static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
stats->txdata_octets = conn->txdata_octets;
stats->rxdata_octets = conn->rxdata_octets;
stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
stats->dataout_pdus = conn->dataout_pdus_cnt;
stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
stats->datain_pdus = conn->datain_pdus_cnt;
stats->r2t_pdus = conn->r2t_pdus_cnt;
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
stats->custom_length = 3;
strcpy(stats->custom[2].desc, "eh_abort_cnt");
stats->custom[2].value = conn->eh_abort_cnt;
stats->digest_err = 0;
stats->timeout_err = 0;
stats->custom_length = 0;
}
/**
* bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
* @dst_addr: target IP address
*
* check if route resolves to BNX2 device
*/
static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
{
struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
struct bnx2i_hba *hba;
struct cnic_dev *cnic = NULL;
bnx2i_reg_dev_all();
hba = get_adapter_list_head();
if (hba && hba->cnic)
cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
if (!cnic) {
printk(KERN_ALERT "bnx2i: no route,"
"can't connect using cnic\n");
goto no_nx2_route;
}
hba = bnx2i_find_hba_for_cnic(cnic);
if (!hba)
goto no_nx2_route;
if (bnx2i_adapter_ready(hba)) {
printk(KERN_ALERT "bnx2i: check route, hba not found\n");
goto no_nx2_route;
}
if (hba->netdev->mtu > hba->mtu_supported) {
printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
hba->netdev->name, hba->netdev->mtu);
printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
hba->mtu_supported);
goto no_nx2_route;
}
return hba;
no_nx2_route:
return NULL;
}
/**
* bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
* @hba: pointer to adapter instance
* @ep: endpoint (transport indentifier) structure
*
* destroys cm_sock structure and on chip iscsi context
*/
static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep)
{
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
hba->cnic->cm_destroy(ep->cm_sk);
if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
ep->state = EP_STATE_DISCONN_COMPL;
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
ep->state == EP_STATE_DISCONN_TIMEDOUT) {
printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
" NW/PCIe trace, driver msgs to developers"
" for analysis\n");
return 1;
}
ep->state = EP_STATE_CLEANUP_START;
init_timer(&ep->ofld_timer);
ep->ofld_timer.expires = 10*HZ + jiffies;
ep->ofld_timer.function = bnx2i_ep_ofld_timer;
ep->ofld_timer.data = (unsigned long) ep;
add_timer(&ep->ofld_timer);
bnx2i_ep_destroy_list_add(hba, ep);
/* destroy iSCSI context, wait for it to complete */
bnx2i_send_conn_destroy(hba, ep);
wait_event_interruptible(ep->ofld_wait,
(ep->state != EP_STATE_CLEANUP_START));
if (signal_pending(current))
flush_signals(current);
del_timer_sync(&ep->ofld_timer);
bnx2i_ep_destroy_list_del(hba, ep);
if (ep->state != EP_STATE_CLEANUP_CMPL)
/* should never happen */
printk(KERN_ALERT "bnx2i - conn destroy failed\n");
return 0;
}
/**
* bnx2i_ep_connect - establish TCP connection to target portal
* @shost: scsi host
* @dst_addr: target IP address
* @non_blocking: blocking or non-blocking call
*
* this routine initiates the TCP/IP connection by invoking Option-2 i/f
* with l5_core and the CNIC. This is a multi-step process of resolving
* route to target, create a iscsi connection context, handshaking with
* CNIC module to create/initialize the socket struct and finally
* sending down option-2 request to complete TCP 3-way handshake
*/
static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
struct sockaddr *dst_addr,
int non_blocking)
{
u32 iscsi_cid = BNX2I_CID_RESERVED;
struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
struct sockaddr_in6 *desti6;
struct bnx2i_endpoint *bnx2i_ep;
struct bnx2i_hba *hba;
struct cnic_dev *cnic;
struct cnic_sockaddr saddr;
struct iscsi_endpoint *ep;
int rc = 0;
if (shost)
/* driver is given scsi host to work with */
hba = iscsi_host_priv(shost);
else
/*
* check if the given destination can be reached through
* a iscsi capable NetXtreme2 device
*/
hba = bnx2i_check_route(dst_addr);
if (!hba) {
rc = -ENOMEM;
goto check_busy;
}
cnic = hba->cnic;
ep = bnx2i_alloc_ep(hba);
if (!ep) {
rc = -ENOMEM;
goto check_busy;
}
bnx2i_ep = ep->dd_data;
mutex_lock(&hba->net_dev_lock);
if (bnx2i_adapter_ready(hba)) {
rc = -EPERM;
goto net_if_down;
}
bnx2i_ep->state = EP_STATE_IDLE;
bnx2i_ep->ep_iscsi_cid = (u16) -1;
bnx2i_ep->num_active_cmds = 0;
iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
if (iscsi_cid == -1) {
printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
rc = -ENOMEM;
goto iscsi_cid_err;
}
bnx2i_ep->hba_age = hba->age;
rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
if (rc != 0) {
printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
rc = -ENOMEM;
goto qp_resc_err;
}
bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
bnx2i_ep->state = EP_STATE_OFLD_START;
bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
init_timer(&bnx2i_ep->ofld_timer);
bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
add_timer(&bnx2i_ep->ofld_timer);
bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
/* Wait for CNIC hardware to setup conn context and return 'cid' */
wait_event_interruptible(bnx2i_ep->ofld_wait,
bnx2i_ep->state != EP_STATE_OFLD_START);
if (signal_pending(current))
flush_signals(current);
del_timer_sync(&bnx2i_ep->ofld_timer);
bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
rc = -ENOSPC;
goto conn_failed;
}
rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
if (rc) {
rc = -EINVAL;
goto conn_failed;
}
bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
memset(&saddr, 0, sizeof(saddr));
if (dst_addr->sa_family == AF_INET) {
desti = (struct sockaddr_in *) dst_addr;
saddr.remote.v4 = *desti;
saddr.local.v4.sin_family = desti->sin_family;
} else if (dst_addr->sa_family == AF_INET6) {
desti6 = (struct sockaddr_in6 *) dst_addr;
saddr.remote.v6 = *desti6;
saddr.local.v6.sin6_family = desti6->sin6_family;
}
bnx2i_ep->timestamp = jiffies;
bnx2i_ep->state = EP_STATE_CONNECT_START;
if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
rc = -EINVAL;
goto conn_failed;
} else
rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
if (rc)
goto release_ep;
if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
goto release_ep;
mutex_unlock(&hba->net_dev_lock);
return ep;
release_ep:
if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
mutex_unlock(&hba->net_dev_lock);
return ERR_PTR(rc);
}
conn_failed:
net_if_down:
iscsi_cid_err:
bnx2i_free_qp_resc(hba, bnx2i_ep);
qp_resc_err:
bnx2i_free_ep(ep);
mutex_unlock(&hba->net_dev_lock);
check_busy:
bnx2i_unreg_dev_all();
return ERR_PTR(rc);
}
/**
* bnx2i_ep_poll - polls for TCP connection establishement
* @ep: TCP connection (endpoint) handle
* @timeout_ms: timeout value in milli secs
*
* polls for TCP connect request to complete
*/
static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct bnx2i_endpoint *bnx2i_ep;
int rc = 0;
bnx2i_ep = ep->dd_data;
if ((bnx2i_ep->state == EP_STATE_IDLE) ||
(bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
(bnx2i_ep->state == EP_STATE_OFLD_FAILED))
return -1;
if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
return 1;
rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
((bnx2i_ep->state ==
EP_STATE_OFLD_FAILED) ||
(bnx2i_ep->state ==
EP_STATE_CONNECT_FAILED) ||
(bnx2i_ep->state ==
EP_STATE_CONNECT_COMPL)),
msecs_to_jiffies(timeout_ms));
if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
rc = -1;
if (rc > 0)
return 1;
else if (!rc)
return 0; /* timeout */
else
return rc;
}
/**
* bnx2i_ep_tcp_conn_active - check EP state transition
* @ep: endpoint pointer
*
* check if underlying TCP connection is active
*/
static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
{
int ret;
int cnic_dev_10g = 0;
if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
cnic_dev_10g = 1;
switch (bnx2i_ep->state) {
case EP_STATE_CONNECT_START:
case EP_STATE_CLEANUP_FAILED:
case EP_STATE_OFLD_FAILED:
case EP_STATE_DISCONN_TIMEDOUT:
ret = 0;
break;
case EP_STATE_CONNECT_COMPL:
case EP_STATE_ULP_UPDATE_START:
case EP_STATE_ULP_UPDATE_COMPL:
case EP_STATE_TCP_FIN_RCVD:
case EP_STATE_ULP_UPDATE_FAILED:
ret = 1;
break;
case EP_STATE_TCP_RST_RCVD:
ret = 0;
break;
case EP_STATE_CONNECT_FAILED:
if (cnic_dev_10g)
ret = 1;
else
ret = 0;
break;
default:
ret = 0;
}
return ret;
}
/**
* bnx2i_ep_disconnect - executes TCP connection teardown process
* @ep: TCP connection (endpoint) handle
*
* executes TCP connection teardown process
*/
static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
{
struct bnx2i_endpoint *bnx2i_ep;
struct bnx2i_conn *bnx2i_conn = NULL;
struct iscsi_session *session = NULL;
struct iscsi_conn *conn;
struct cnic_dev *cnic;
struct bnx2i_hba *hba;
bnx2i_ep = ep->dd_data;
/* driver should not attempt connection cleanup untill TCP_CONNECT
* completes either successfully or fails. Timeout is 9-secs, so
* wait for it to complete
*/
while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
msleep(250);
if (bnx2i_ep->conn) {
bnx2i_conn = bnx2i_ep->conn;
conn = bnx2i_conn->cls_conn->dd_data;
session = conn->session;
spin_lock_bh(&session->lock);
bnx2i_conn->is_bound = 0;
spin_unlock_bh(&session->lock);
}
hba = bnx2i_ep->hba;
if (bnx2i_ep->state == EP_STATE_IDLE)
goto return_bnx2i_ep;
cnic = hba->cnic;
mutex_lock(&hba->net_dev_lock);
if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
goto free_resc;
if (bnx2i_ep->hba_age != hba->age)
goto free_resc;
if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
goto destory_conn;
bnx2i_ep->state = EP_STATE_DISCONN_START;
init_timer(&bnx2i_ep->ofld_timer);
bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
add_timer(&bnx2i_ep->ofld_timer);
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
int close = 0;
if (session) {
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_LOGGING_OUT)
close = 1;
spin_unlock_bh(&session->lock);
}
if (close)
cnic->cm_close(bnx2i_ep->cm_sk);
else
cnic->cm_abort(bnx2i_ep->cm_sk);
} else
goto free_resc;
/* wait for option-2 conn teardown */
wait_event_interruptible(bnx2i_ep->ofld_wait,
bnx2i_ep->state != EP_STATE_DISCONN_START);
if (signal_pending(current))
flush_signals(current);
del_timer_sync(&bnx2i_ep->ofld_timer);
destory_conn:
if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
mutex_unlock(&hba->net_dev_lock);
return;
}
free_resc:
mutex_unlock(&hba->net_dev_lock);
bnx2i_free_qp_resc(hba, bnx2i_ep);
return_bnx2i_ep:
if (bnx2i_conn)
bnx2i_conn->ep = NULL;
bnx2i_free_ep(ep);
if (!hba->ofld_conns_active)
bnx2i_unreg_dev_all();
}
/**
* bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
* @buf: pointer to buffer containing iscsi path message
*
*/
static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
{
struct bnx2i_hba *hba = iscsi_host_priv(shost);
char *buf = (char *) params;
u16 len = sizeof(*params);
/* handled by cnic driver */
hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
len);
return 0;
}
/*
* 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
* used while registering with the scsi host and iSCSI transport module.
*/
static struct scsi_host_template bnx2i_host_template = {
.module = THIS_MODULE,
.name = "Broadcom Offload iSCSI Initiator",
.proc_name = "bnx2i",
.queuecommand = iscsi_queuecommand,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_target_reset,
.can_queue = 1024,
.max_sectors = 127,
.cmd_per_lun = 32,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
.shost_attrs = bnx2i_dev_attributes,
};
struct iscsi_transport bnx2i_iscsi_transport = {
.owner = THIS_MODULE,
.name = "bnx2i",
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
CAP_MULTI_R2T | CAP_DATADGST |
CAP_DATA_PATH_OFFLOAD,
.param_mask = ISCSI_MAX_RECV_DLENGTH |
ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN |
ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN |
ISCSI_MAX_R2T |
ISCSI_IMM_DATA_EN |
ISCSI_FIRST_BURST |
ISCSI_MAX_BURST |
ISCSI_PDU_INORDER_EN |
ISCSI_DATASEQ_INORDER_EN |
ISCSI_ERL |
ISCSI_CONN_PORT |
ISCSI_CONN_ADDRESS |
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
ISCSI_TARGET_NAME | ISCSI_TPGT |
ISCSI_USERNAME | ISCSI_PASSWORD |
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
ISCSI_LU_RESET_TMO |
ISCSI_PING_TMO | ISCSI_RECV_TMO |
ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
.create_session = bnx2i_session_create,
.destroy_session = bnx2i_session_destroy,
.create_conn = bnx2i_conn_create,
.bind_conn = bnx2i_conn_bind,
.destroy_conn = bnx2i_conn_destroy,
.set_param = iscsi_set_param,
.get_conn_param = bnx2i_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = bnx2i_host_get_param,
.start_conn = bnx2i_conn_start,
.stop_conn = iscsi_conn_stop,
.send_pdu = iscsi_conn_send_pdu,
.xmit_task = bnx2i_task_xmit,
.get_stats = bnx2i_conn_get_stats,
/* TCP connect - disconnect - option-2 interface calls */
.ep_connect = bnx2i_ep_connect,
.ep_poll = bnx2i_ep_poll,
.ep_disconnect = bnx2i_ep_disconnect,
.set_path = bnx2i_nl_set_path,
/* Error recovery timeout call */
.session_recovery_timedout = iscsi_session_recovery_timedout,
.cleanup_task = bnx2i_cleanup_task,
};
/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
*
* Copyright (c) 2004 - 2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
*/
#include "bnx2i.h"
/**
* bnx2i_dev_to_hba - maps dev pointer to adapter struct
* @dev: device pointer
*
* Map device to hba structure
*/
static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
{
struct Scsi_Host *shost = class_to_shost(dev);
return iscsi_host_priv(shost);
}
/**
* bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
* @dev: device pointer
* @buf: buffer to return current SQ size parameter
*
* Returns current SQ size parameter, this paramater determines the number
* outstanding iSCSI commands supported on a connection
*/
static ssize_t bnx2i_show_sq_info(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
return sprintf(buf, "0x%x\n", hba->max_sqes);
}
/**
* bnx2i_set_sq_info - update send queue (SQ) size parameter
* @dev: device pointer
* @buf: buffer to return current SQ size parameter
* @count: parameter buffer size
*
* Interface for user to change shared queue size allocated for each conn
* Must be within SQ limits and a power of 2. For the latter this is needed
* because of how libiscsi preallocates tasks.
*/
static ssize_t bnx2i_set_sq_info(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
u32 val;
int max_sq_size;
if (hba->ofld_conns_active)
goto skip_config;
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
else
max_sq_size = BNX2I_570X_SQ_WQES_MAX;
if (sscanf(buf, " 0x%x ", &val) > 0) {
if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
(is_power_of_2(val)))
hba->max_sqes = val;
}
return count;
skip_config:
printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
return 0;
}
/**
* bnx2i_show_ccell_info - returns command cell (HQ) size
* @dev: device pointer
* @buf: buffer to return current SQ size parameter
*
* returns per-connection TCP history queue size parameter
*/
static ssize_t bnx2i_show_ccell_info(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
return sprintf(buf, "0x%x\n", hba->num_ccell);
}
/**
* bnx2i_get_link_state - set command cell (HQ) size
* @dev: device pointer
* @buf: buffer to return current SQ size parameter
* @count: parameter buffer size
*
* updates per-connection TCP history queue size parameter
*/
static ssize_t bnx2i_set_ccell_info(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 val;
struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
if (hba->ofld_conns_active)
goto skip_config;
if (sscanf(buf, " 0x%x ", &val) > 0) {
if ((val >= BNX2I_CCELLS_MIN) &&
(val <= BNX2I_CCELLS_MAX)) {
hba->num_ccell = val;
}
}
return count;
skip_config:
printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
return 0;
}
static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
bnx2i_show_sq_info, bnx2i_set_sq_info);
static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
bnx2i_show_ccell_info, bnx2i_set_ccell_info);
struct device_attribute *bnx2i_dev_attributes[] = {
&dev_attr_sq_size,
&dev_attr_num_ccell,
NULL
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment