Commit 0ce77802 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnx2x-next'

Yuval Mintz says:

====================
bnx2x: Support new Multi-function modes

This patch series adds support for 2 new Multi-function modes -
Unified Fabric Port [UFP] as well as nic partitioning 1.5 [NPAR1.5].

With the addition of the new multi-function modes, the series also
revises some of the storage-related multi-function macros.

[Do notice this series has several small issues with checkpatch]
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 77f4f622 83bad206
......@@ -1448,6 +1448,12 @@ struct bnx2x_fp_stats {
struct bnx2x_eth_q_stats_old eth_q_stats_old;
};
enum {
SUB_MF_MODE_UNKNOWN = 0,
SUB_MF_MODE_UFP,
SUB_MF_MODE_NPAR1_DOT_5,
};
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
......@@ -1659,6 +1665,9 @@ struct bnx2x {
#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX)
u8 mf_sub_mode;
#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \
bp->mf_sub_mode == SUB_MF_MODE_UFP)
u8 wol;
......@@ -2361,7 +2370,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define ATTN_HARD_WIRED_MASK 0xff00
#define ATTENTION_ID 4
#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \
#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \
IS_MF_FCOE_AFEX(bp))
/* stuff added to make the code fit 80Col */
......@@ -2537,15 +2546,45 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
#define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp))
#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \
#define IS_MF_ISCSI_ONLY(bp) (IS_MF_ISCSI_SD(bp) || IS_MF_ISCSI_SI(bp))
#define BNX2X_MF_EXT_PROTOCOL_MASK \
(MACP_FUNC_CFG_FLAGS_ETHERNET | \
MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD | \
MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
#define BNX2X_MF_EXT_PROT(bp) ((bp)->mf_ext_config & \
BNX2X_MF_EXT_PROTOCOL_MASK)
#define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp) \
(BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
#define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp) \
(BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
#define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) \
(BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)
#define IS_MF_FCOE_AFEX(bp) \
(IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))
#define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) \
(IS_MF_SD(bp) && \
(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
#define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp) \
(IS_MF_SI(bp) && \
(BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)))
#define IS_MF_STORAGE_PERSONALITY_ONLY(bp) \
(IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \
IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
#define SET_FLAG(value, mask, flag) \
do {\
(value) &= ~(mask);\
......
......@@ -1938,7 +1938,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
/* override in STORAGE SD modes */
if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
if (IS_MF_STORAGE_ONLY(bp))
bp->num_ethernet_queues = 1;
/* Add special queues */
......@@ -4231,14 +4231,13 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
if (!is_valid_ether_addr(addr->sa_data)) {
BNX2X_ERR("Requested MAC address is not valid\n");
return -EINVAL;
}
if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
!is_zero_ether_addr(addr->sa_data)) {
BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
if (IS_MF_STORAGE_ONLY(bp)) {
BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
return -EINVAL;
}
......@@ -4417,8 +4416,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
u8 cos;
int rx_ring_size = 0;
if (!bp->rx_ring_size &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size;
} else if (!bp->rx_ring_size) {
......
......@@ -936,6 +936,12 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
start_params->gre_tunnel_type = IPGRE_TUNNEL;
start_params->inner_gre_rss_en = 1;
if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
start_params->class_fail_ethtype = ETH_P_FIP;
start_params->class_fail = 1;
start_params->no_added_tags = 1;
}
return bnx2x_func_state_change(bp, &func_params);
}
......@@ -1298,15 +1304,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
}
}
static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
{
if (is_valid_ether_addr(addr) ||
(is_zero_ether_addr(addr) &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))))
return true;
return false;
}
/**
* bnx2x_fill_fw_str - Fill buffer with FW version string
......
......@@ -1852,7 +1852,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
if ((ering->rx_pending > MAX_RX_AVAIL) ||
(ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA)) ||
(ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
(ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
......
......@@ -280,17 +280,11 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
u32 power_dissipated; /* 0x11c */
#define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000
#define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16
#define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000
#define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000
#define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000
#define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000
#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
u32 config_3; /* 0x11C */
#define SHARED_HW_CFG_EXTENDED_MF_MODE_MASK 0x00000F00
#define SHARED_HW_CFG_EXTENDED_MF_MODE_SHIFT 8
#define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5 0x00000000
#define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR2_DOT_0 0x00000100
u32 ump_nc_si_config; /* 0x120 */
#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
......@@ -859,6 +853,8 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
#define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600
#define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700
/* The interval in seconds between sending LLDP packets. Set to zero
to disable the feature */
......@@ -1268,6 +1264,10 @@ struct drv_func_mb {
#define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
#define DRV_MSG_CODE_OEM_OK 0x00010000
#define DRV_MSG_CODE_OEM_FAILURE 0x00020000
#define DRV_MSG_CODE_OEM_UPDATE_SVID_OK 0x00030000
#define DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE 0x00040000
/*
* The optic module verification command requires bootcode
* v5.0.6 or later, te specific optic module verification command
......@@ -1423,6 +1423,12 @@ struct drv_func_mb {
#define DRV_STATUS_SET_MF_BW 0x00000004
#define DRV_STATUS_LINK_EVENT 0x00000008
#define DRV_STATUS_OEM_EVENT_MASK 0x00000070
#define DRV_STATUS_OEM_DISABLE_ENABLE_PF 0x00000010
#define DRV_STATUS_OEM_BANDWIDTH_ALLOCATION 0x00000020
#define DRV_STATUS_OEM_UPDATE_SVID 0x00000080
#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
......
......@@ -2905,6 +2905,57 @@ static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
}
}
static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
{
struct bnx2x_func_switch_update_params *switch_update_params;
struct bnx2x_func_state_params func_params;
memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
switch_update_params = &func_params.params.switch_update;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
if (IS_MF_UFP(bp)) {
int func = BP_ABS_FUNC(bp);
u32 val;
/* Re-learn the S-tag from shmem */
val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
FUNC_MF_CFG_E1HOV_TAG_MASK;
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
bp->mf_ov = val;
} else {
BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
goto fail;
}
/* Configure new S-tag in LLH */
REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
bp->mf_ov);
/* Send Ramrod to update FW of change */
__set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
&switch_update_params->changes);
switch_update_params->vlan = bp->mf_ov;
if (bnx2x_func_state_change(bp, &func_params) < 0) {
BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
bp->mf_ov);
goto fail;
}
DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
return;
}
/* not supported by SW yet */
fail:
bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
}
static void bnx2x_pmf_update(struct bnx2x *bp)
{
int port = BP_PORT(bp);
......@@ -3297,7 +3348,8 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
/* Tx queue should be only re-enabled */
netif_tx_wake_all_queues(bp->dev);
......@@ -3652,14 +3704,30 @@ void bnx2x_update_mng_version(struct bnx2x *bp)
ethver, iscsiver, fcoever);
}
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
u32 cmd_ok, cmd_fail;
if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
/* sanity */
if (event & DRV_STATUS_DCC_EVENT_MASK &&
event & DRV_STATUS_OEM_EVENT_MASK) {
BNX2X_ERR("Received simultaneous events %08x\n", event);
return;
}
/*
* This is the only place besides the function initialization
if (event & DRV_STATUS_DCC_EVENT_MASK) {
cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
cmd_ok = DRV_MSG_CODE_DCC_OK;
} else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
cmd_ok = DRV_MSG_CODE_OEM_OK;
}
DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
/* This is the only place besides the function initialization
* where the bp->flags can change so it is done without any
* locks
*/
......@@ -3674,18 +3742,22 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
bnx2x_e1h_enable(bp);
}
dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
DRV_STATUS_OEM_DISABLE_ENABLE_PF);
}
if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
bnx2x_config_mf_bw(bp);
dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
}
/* Report results to MCP */
if (dcc_event)
bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
if (event)
bnx2x_fw_command(bp, cmd_fail, 0);
else
bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
bnx2x_fw_command(bp, cmd_ok, 0);
}
/* must be called under the spq lock */
......@@ -4167,9 +4239,12 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
func_mf_config[BP_ABS_FUNC(bp)].config);
val = SHMEM_RD(bp,
func_mb[BP_FW_MB_IDX(bp)].drv_status);
if (val & DRV_STATUS_DCC_EVENT_MASK)
bnx2x_dcc_event(bp,
(val & DRV_STATUS_DCC_EVENT_MASK));
if (val & (DRV_STATUS_DCC_EVENT_MASK |
DRV_STATUS_OEM_EVENT_MASK))
bnx2x_oem_event(bp,
(val & (DRV_STATUS_DCC_EVENT_MASK |
DRV_STATUS_OEM_EVENT_MASK)));
if (val & DRV_STATUS_SET_MF_BW)
bnx2x_set_mf_bw(bp);
......@@ -4195,6 +4270,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
val & DRV_STATUS_AFEX_EVENT_MASK);
if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
bnx2x_handle_eee_event(bp);
if (val & DRV_STATUS_OEM_UPDATE_SVID)
bnx2x_handle_update_svid_cmd(bp);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
......@@ -7930,8 +8009,11 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
if (IS_MF(bp)) {
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
bp->mf_ov);
}
}
bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
......@@ -8323,13 +8405,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
if (is_zero_ether_addr(bp->dev->dev_addr) &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
"Ignoring Zero MAC for STORAGE SD mode\n");
return 0;
}
if (IS_PF(bp)) {
unsigned long ramrod_flags = 0;
......@@ -11355,14 +11430,13 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
dev_info.port_hw_config[port].
fcoe_wwn_node_name_lower);
} else if (!IS_MF_SD(bp)) {
/*
* Read the WWN info only if the FCoE feature is enabled for
/* Read the WWN info only if the FCoE feature is enabled for
* this function.
*/
if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
bnx2x_get_ext_wwn_info(bp, func);
} else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
} else {
if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
bnx2x_get_ext_wwn_info(bp, func);
}
......@@ -11401,7 +11475,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
* In non SD mode features configuration comes from struct
* func_ext_config.
*/
if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
if (!IS_MF_SD(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
......@@ -11520,7 +11594,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
if (!is_valid_ether_addr(bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: %pM\n"
"change it manually before bringing up the appropriate network interface\n",
......@@ -11550,11 +11624,27 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
return cfg;
}
static void validate_set_si_mode(struct bnx2x *bp)
{
u8 func = BP_ABS_FUNC(bp);
u32 val;
val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
/* check for legal mac (upper bytes) */
if (val != 0xffff) {
bp->mf_mode = MULTI_FUNCTION_SI;
bp->mf_config[BP_VN(bp)] =
MF_CFG_RD(bp, func_mf_config[func].config);
} else
BNX2X_DEV_INFO("illegal MAC address for SI\n");
}
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
int vn;
u32 val = 0;
u32 val = 0, val2 = 0;
int rc = 0;
bnx2x_get_common_hwinfo(bp);
......@@ -11634,6 +11724,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
bp->mf_sub_mode = 0;
vn = BP_VN(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
......@@ -11663,15 +11754,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
switch (val) {
case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
val = MF_CFG_RD(bp, func_mf_config[func].
mac_upper);
/* check for legal mac (upper bytes)*/
if (val != 0xffff) {
bp->mf_mode = MULTI_FUNCTION_SI;
bp->mf_config[vn] = MF_CFG_RD(bp,
func_mf_config[func].config);
} else
BNX2X_DEV_INFO("illegal MAC address for SI\n");
validate_set_si_mode(bp);
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
if ((!CHIP_IS_E1x(bp)) &&
......@@ -11699,9 +11782,33 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
bp->mf_mode = MULTI_FUNCTION_SD;
bp->mf_sub_mode = SUB_MF_MODE_UFP;
bp->mf_config[vn] =
MF_CFG_RD(bp,
func_mf_config[func].config);
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
bp->mf_config[vn] = 0;
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
val2 = SHMEM_RD(bp,
dev_info.shared_hw_config.config_3);
val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
switch (val2) {
case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
validate_set_si_mode(bp);
bp->mf_sub_mode =
SUB_MF_MODE_NPAR1_DOT_5;
break;
default:
/* Unknown configuration */
bp->mf_config[vn] = 0;
BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
val);
}
break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
......@@ -11722,6 +11829,11 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
func, bp->mf_ov, bp->mf_ov);
} else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
dev_err(&bp->pdev->dev,
"Unexpected - no valid MF OV for func %d in UFP mode\n",
func);
bp->path_has_ovlan = true;
} else {
dev_err(&bp->pdev->dev,
"No valid MF OV for func %d, aborting\n",
......@@ -11970,7 +12082,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
bp->disable_tpa = disable_tpa;
bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
/* Reduce memory usage in kdump environment by disabling TPA */
bp->disable_tpa |= is_kdump_kernel();
......@@ -11990,7 +12102,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
if (IS_VF(bp))
bp->rx_ring_size = MAX_RX_AVAIL;
......@@ -12310,7 +12422,7 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
bp->rx_mode = rx_mode;
/* handle ISCSI SD mode */
if (IS_MF_ISCSI_SD(bp))
if (IS_MF_ISCSI_ONLY(bp))
bp->rx_mode = BNX2X_RX_MODE_NONE;
/* Schedule the rx_mode command */
......@@ -12417,7 +12529,7 @@ static int bnx2x_validate_addr(struct net_device *dev)
if (IS_VF(bp))
bnx2x_sample_bulletin(bp);
if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
if (!is_valid_ether_addr(dev->dev_addr)) {
BNX2X_ERR("Non-valid Ethernet address\n");
return -EADDRNOTAVAIL;
}
......
......@@ -5673,8 +5673,23 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->gre_tunnel_type = start_params->gre_tunnel_type;
rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
rdata->vxlan_dst_port = cpu_to_le16(4789);
rdata->sd_vlan_eth_type = cpu_to_le16(0x8100);
rdata->sd_accept_mf_clss_fail = start_params->class_fail;
if (start_params->class_fail_ethtype) {
rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
rdata->sd_accept_mf_clss_fail_ethtype =
cpu_to_le16(start_params->class_fail_ethtype);
}
rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
if (start_params->sd_vlan_eth_type)
rdata->sd_vlan_eth_type =
cpu_to_le16(start_params->sd_vlan_eth_type);
else
rdata->sd_vlan_eth_type =
cpu_to_le16(0x8100);
rdata->no_added_tags = start_params->no_added_tags;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
......@@ -5708,6 +5723,30 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
&switch_update_params->changes);
}
if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
&switch_update_params->changes)) {
rdata->sd_vlan_tag_change_flg = 1;
rdata->sd_vlan_tag =
cpu_to_le16(switch_update_params->vlan);
}
if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
&switch_update_params->changes)) {
rdata->sd_vlan_eth_type_change_flg = 1;
rdata->sd_vlan_eth_type =
cpu_to_le16(switch_update_params->vlan_eth_type);
}
if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
&switch_update_params->changes)) {
rdata->sd_vlan_force_pri_change_flg = 1;
if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
&switch_update_params->changes))
rdata->sd_vlan_force_pri_flg = 1;
rdata->sd_vlan_force_pri_flg =
switch_update_params->vlan_force_prio;
}
if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes)) {
rdata->update_tunn_cfg_flg = 1;
......
......@@ -1098,6 +1098,10 @@ struct bnx2x_queue_sp_obj {
enum {
BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
......@@ -1178,10 +1182,29 @@ struct bnx2x_func_start_params {
* capailities
*/
u8 inner_gre_rss_en;
/* Allows accepting of packets failing MF classification, possibly
* only matching a given ethertype
*/
u8 class_fail;
u16 class_fail_ethtype;
/* Override priority of output packets */
u8 sd_vlan_force_pri;
u8 sd_vlan_force_pri_val;
/* Replace vlan's ethertype */
u16 sd_vlan_eth_type;
/* Prevent inner vlans from being added by FW */
u8 no_added_tags;
};
struct bnx2x_func_switch_update_params {
unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
u16 vlan;
u16 vlan_eth_type;
u8 vlan_force_prio;
u8 tunnel_mode;
u8 gre_tunnel_type;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment