Commit 91226790 authored by Dmitry Kravkov's avatar Dmitry Kravkov Committed by David S. Miller

bnx2x: use FW 7.8.17

Update appropriate HSI files and adapt driver accordingly.
Signed-off-by: default avatarDmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: default avatarYuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 82594f8f
...@@ -729,7 +729,7 @@ struct bnx2x_fastpath { ...@@ -729,7 +729,7 @@ struct bnx2x_fastpath {
#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ #define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
skb->csum_offset)) skb->csum_offset))
#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) #define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
#define XMIT_PLAIN 0 #define XMIT_PLAIN 0
#define XMIT_CSUM_V4 0x1 #define XMIT_CSUM_V4 0x1
......
...@@ -3086,11 +3086,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget) ...@@ -3086,11 +3086,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
* to ease the pain of our fellow microcode engineers * to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs * we use one mapping for both BDs
*/ */
static noinline u16 bnx2x_tx_split(struct bnx2x *bp, static u16 bnx2x_tx_split(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, struct bnx2x_fp_txdata *txdata,
struct sw_tx_bd *tx_buf, struct sw_tx_bd *tx_buf,
struct eth_tx_start_bd **tx_bd, u16 hlen, struct eth_tx_start_bd **tx_bd, u16 hlen,
u16 bd_prod, int nbd) u16 bd_prod)
{ {
struct eth_tx_start_bd *h_tx_bd = *tx_bd; struct eth_tx_start_bd *h_tx_bd = *tx_bd;
struct eth_tx_bd *d_tx_bd; struct eth_tx_bd *d_tx_bd;
...@@ -3098,11 +3098,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, ...@@ -3098,11 +3098,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
int old_len = le16_to_cpu(h_tx_bd->nbytes); int old_len = le16_to_cpu(h_tx_bd->nbytes);
/* first fix first BD */ /* first fix first BD */
h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen); h_tx_bd->nbytes = cpu_to_le16(hlen);
DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n", DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd); h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
/* now get a new data BD /* now get a new data BD
* (after the pbd) and fill it */ * (after the pbd) and fill it */
...@@ -3131,7 +3130,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, ...@@ -3131,7 +3130,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{ {
__sum16 tsum = (__force __sum16) csum; __sum16 tsum = (__force __sum16) csum;
...@@ -3146,7 +3145,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) ...@@ -3146,7 +3145,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
return bswab16(tsum); return bswab16(tsum);
} }
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
{ {
u32 rc; u32 rc;
...@@ -3254,8 +3253,8 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, ...@@ -3254,8 +3253,8 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
} }
#endif #endif
static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
u32 xmit_type) u32 xmit_type)
{ {
*parsing_data |= (skb_shinfo(skb)->gso_size << *parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
...@@ -3272,13 +3271,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, ...@@ -3272,13 +3271,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
* @pbd: parse BD * @pbd: parse BD
* @xmit_type: xmit flags * @xmit_type: xmit flags
*/ */
static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, static void bnx2x_set_pbd_gso(struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd, struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type) u32 xmit_type)
{ {
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
pbd->tcp_flags = pbd_tcp_flags(skb); pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) { if (xmit_type & XMIT_GSO_V4) {
pbd->ip_id = bswab16(ip_hdr(skb)->id); pbd->ip_id = bswab16(ip_hdr(skb)->id);
...@@ -3305,15 +3304,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, ...@@ -3305,15 +3304,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @parsing_data: data to be updated * @parsing_data: data to be updated
* @xmit_type: xmit flags * @xmit_type: xmit flags
* *
* 57712 related * 57712/578xx related
*/ */
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type) u32 *parsing_data, u32 xmit_type)
{ {
*parsing_data |= *parsing_data |=
((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) { if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) << *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
...@@ -3328,17 +3327,14 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, ...@@ -3328,17 +3327,14 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
} }
static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) struct eth_tx_start_bd *tx_start_bd,
u32 xmit_type)
{ {
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
if (xmit_type & XMIT_CSUM_V4) if (xmit_type & XMIT_CSUM_V6)
tx_start_bd->bd_flags.as_bitfield |= tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
ETH_TX_BD_FLAGS_IP_CSUM;
else
tx_start_bd->bd_flags.as_bitfield |=
ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP)) if (!(xmit_type & XMIT_CSUM_TCP))
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
...@@ -3352,9 +3348,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, ...@@ -3352,9 +3348,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
* @pbd: parse BD to be updated * @pbd: parse BD to be updated
* @xmit_type: xmit flags * @xmit_type: xmit flags
*/ */
static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd, struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type) u32 xmit_type)
{ {
u8 hlen = (skb_network_header(skb) - skb->data) >> 1; u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
...@@ -3482,7 +3478,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3482,7 +3478,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mac_type = MULTICAST_ADDRESS; mac_type = MULTICAST_ADDRESS;
} }
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* First, check if we need to linearize the skb (due to FW /* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K restrictions). No need to check fragmentation if page size > 8K
(there will be no violation to FW restrictions) */ (there will be no violation to FW restrictions) */
...@@ -3530,12 +3526,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3530,12 +3526,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd; first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_PARSE_NBDS,
0);
/* header nbd */ /* header nbd: indirectly zero other flags! */
SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
/* remember the first BD of the packet */ /* remember the first BD of the packet */
tx_buf->first_bd = txdata->tx_bd_prod; tx_buf->first_bd = txdata->tx_bd_prod;
...@@ -3555,19 +3548,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3555,19 +3548,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype /* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it * for fw to enforce it
*/ */
#ifndef BNX2X_STOP_ON_ERROR if (IS_VF(bp))
if (IS_VF(bp)) {
#endif
tx_start_bd->vlan_or_ethertype = tx_start_bd->vlan_or_ethertype =
cpu_to_le16(ntohs(eth->h_proto)); cpu_to_le16(ntohs(eth->h_proto));
#ifndef BNX2X_STOP_ON_ERROR else
} else {
/* used by FW for packet accounting */ /* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
}
#endif
} }
nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
/* turn on parsing and get a BD */ /* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
...@@ -3579,21 +3569,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3579,21 +3569,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
/* Set PBD in checksum offload case */ /* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM) if (xmit_type & XMIT_CSUM)
/* Set PBD in checksum offload case w/o encapsulation */
hlen = bnx2x_set_pbd_csum_e2(bp, skb, hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data, &pbd_e2_parsing_data,
xmit_type); xmit_type);
if (IS_MF_SI(bp) || IS_VF(bp)) { /* Add the macs to the parsing BD this is a vf */
/* fill in the MAC addresses in the PBD - for local if (IS_VF(bp)) {
* switching /* override GRE parameters in BD */
*/ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, &pbd_e2->data.mac_addr.src_mid,
&pbd_e2->src_mac_addr_mid, &pbd_e2->data.mac_addr.src_lo,
&pbd_e2->src_mac_addr_lo,
eth->h_source); eth->h_source);
bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
&pbd_e2->dst_mac_addr_mid, bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
&pbd_e2->dst_mac_addr_lo, &pbd_e2->data.mac_addr.dst_mid,
&pbd_e2->data.mac_addr.dst_lo,
eth->h_dest); eth->h_dest);
} }
...@@ -3615,14 +3606,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3615,14 +3606,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup the data pointer of the first BD of the packet */ /* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes; pkt_size = tx_start_bd->nbytes;
DP(NETIF_MSG_TX_QUEUED, DP(NETIF_MSG_TX_QUEUED,
"first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n", "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield, tx_start_bd->bd_flags.as_bitfield,
le16_to_cpu(tx_start_bd->vlan_or_ethertype)); le16_to_cpu(tx_start_bd->vlan_or_ethertype));
...@@ -3635,10 +3625,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3635,10 +3625,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
if (unlikely(skb_headlen(skb) > hlen)) if (unlikely(skb_headlen(skb) > hlen)) {
nbd++;
bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
&tx_start_bd, hlen, &tx_start_bd, hlen,
bd_prod, ++nbd); bd_prod);
}
if (!CHIP_IS_E1x(bp)) if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type); xmit_type);
...@@ -3728,9 +3720,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3728,9 +3720,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e2) if (pbd_e2)
DP(NETIF_MSG_TX_QUEUED, DP(NETIF_MSG_TX_QUEUED,
"PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, pbd_e2,
pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, pbd_e2->data.mac_addr.dst_hi,
pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, pbd_e2->data.mac_addr.dst_mid,
pbd_e2->data.mac_addr.dst_lo,
pbd_e2->data.mac_addr.src_hi,
pbd_e2->data.mac_addr.src_mid,
pbd_e2->data.mac_addr.src_lo,
pbd_e2->parsing_data); pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
......
...@@ -30,31 +30,31 @@ ...@@ -30,31 +30,31 @@
* IRO[138].m2) + ((sbId) * IRO[138].m3)) * IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) #define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1)) (IRO[317].base + ((pfId) * IRO[317].m1))
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[318].base + ((pfId) * IRO[318].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[315].base + ((pfId) * IRO[315].m1)) (IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[307].base + ((pfId) * IRO[307].m1)) (IRO[308].base + ((pfId) * IRO[308].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1)) (IRO[307].base + ((pfId) * IRO[307].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[305].base + ((pfId) * IRO[305].m1)) (IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1)) (IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
...@@ -114,7 +114,7 @@ ...@@ -114,7 +114,7 @@
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1)) (IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
(IRO[277].base + ((pfId) * IRO[277].m1)) (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1)) (IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
...@@ -136,35 +136,32 @@ ...@@ -136,35 +136,32 @@
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1)) (IRO[176].base + ((assertListEntry) * IRO[176].m1))
#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
(IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1)) (IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
(IRO[318].base + ((pfId) * IRO[318].m1)) (IRO[319].base + ((pfId) * IRO[319].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \ #define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1)) (IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1)) (IRO[283].base + ((pfId) * IRO[283].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[287].base + ((pfId) * IRO[287].m1)) (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
(IRO[284].base + ((pfId) * IRO[284].m1)) (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1)) (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1)) (IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[278].base + ((pfId) * IRO[278].m1)) (IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1)) (IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
(IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1)) (IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1)) (IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
...@@ -190,39 +187,39 @@ ...@@ -190,39 +187,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \ #define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1)) (IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[295].base + ((pfId) * IRO[295].m1)) (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
(IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1)) (IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1)) (IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1)) (IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1)) (IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1)) (IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1)) (IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
(IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[294].base + ((pfId) * IRO[294].m1)) (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1)) (IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[292].base + ((pfId) * IRO[292].m1)) (IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[297].base + ((pfId) * IRO[297].m1)) (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
(IRO[296].base + ((pfId) * IRO[296].m1)) (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
(IRO[291].base + ((pfId) * IRO[291].m1)) (IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1)) (IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
(IRO[289].base + ((pfId) * IRO[289].m1)) (IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
(IRO[288].base + ((pfId) * IRO[288].m1)) (IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1)) (IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
......
...@@ -2953,14 +2953,15 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, ...@@ -2953,14 +2953,15 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ACTIVE, &flags); __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
/* tx only connections collect statistics (on the same index as the /* tx only connections collect statistics (on the same index as the
* parent connection). The statistics are zeroed when the parent * parent connection). The statistics are zeroed when the parent
* connection is initialized. * connection is initialized.
*/ */
__set_bit(BNX2X_Q_FLG_STATS, &flags); __set_bit(BNX2X_Q_FLG_STATS, &flags);
if (zero_stats) if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
__set_bit(BNX2X_Q_FLG_TX_SEC, &flags); __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
......
...@@ -476,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp, ...@@ -476,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */ /* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link) list_for_each_entry(pos, &o->head, link)
if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST; return -EEXIST;
return 0; return 0;
...@@ -509,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, ...@@ -509,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
list_for_each_entry(pos, &o->head, link) list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
ETH_ALEN))) ETH_ALEN)) &&
(data->vlan_mac.is_inner_mac ==
pos->u.vlan_mac.is_inner_mac))
return -EEXIST; return -EEXIST;
return 0; return 0;
...@@ -527,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem * ...@@ -527,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link) list_for_each_entry(pos, &o->head, link)
if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos; return pos;
return NULL; return NULL;
...@@ -562,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem * ...@@ -562,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem *
list_for_each_entry(pos, &o->head, link) list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
ETH_ALEN))) ETH_ALEN)) &&
(data->vlan_mac.is_inner_mac ==
pos->u.vlan_mac.is_inner_mac))
return pos; return pos;
return NULL; return NULL;
...@@ -759,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, ...@@ -759,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid, &rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac); &rule_entry->mac.mac_lsb, mac);
rule_entry->mac.inner_mac =
cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */ /* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) { if (cmd == BNX2X_VLAN_MAC_MOVE) {
...@@ -775,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, ...@@ -775,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid, &rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac); &rule_entry->mac.mac_lsb, mac);
rule_entry->mac.inner_mac =
cpu_to_le16(elem->cmd_data.vlan_mac.
u.mac.is_inner_mac);
} }
/* Set the ramrod data header */ /* Set the ramrod data header */
...@@ -963,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, ...@@ -963,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid, &rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac); &rule_entry->pair.mac_lsb, mac);
rule_entry->pair.inner_mac =
cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */ /* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) { if (cmd == BNX2X_VLAN_MAC_MOVE) {
rule_entry++; rule_entry++;
...@@ -980,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, ...@@ -980,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid, &rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac); &rule_entry->pair.mac_lsb, mac);
rule_entry->pair.inner_mac =
cpu_to_le16(elem->cmd_data.vlan_mac.u.
vlan_mac.is_inner_mac);
} }
/* Set the ramrod data header */ /* Set the ramrod data header */
...@@ -4417,6 +4432,10 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, ...@@ -4417,6 +4432,10 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
tx_data->force_default_pri_flg = tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
tx_data->tunnel_non_lso_pcsum_location =
test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
PCSUM_ON_BD;
tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index; tx_data->tx_sb_index_number = params->sb_cq_index;
tx_data->tss_leading_client_id = params->tss_leading_cl_id; tx_data->tss_leading_client_id = params->tss_leading_cl_id;
......
...@@ -100,6 +100,7 @@ struct bnx2x_raw_obj { ...@@ -100,6 +100,7 @@ struct bnx2x_raw_obj {
/************************* VLAN-MAC commands related parameters ***************/ /************************* VLAN-MAC commands related parameters ***************/
struct bnx2x_mac_ramrod_data { struct bnx2x_mac_ramrod_data {
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
u8 is_inner_mac;
}; };
struct bnx2x_vlan_ramrod_data { struct bnx2x_vlan_ramrod_data {
...@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data { ...@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data {
struct bnx2x_vlan_mac_ramrod_data { struct bnx2x_vlan_mac_ramrod_data {
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
u8 is_inner_mac;
u16 vlan; u16 vlan;
}; };
...@@ -825,7 +827,8 @@ enum { ...@@ -825,7 +827,8 @@ enum {
BNX2X_Q_FLG_TX_SEC, BNX2X_Q_FLG_TX_SEC,
BNX2X_Q_FLG_ANTI_SPOOF, BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM, BNX2X_Q_FLG_SILENT_VLAN_REM,
BNX2X_Q_FLG_FORCE_DEFAULT_PRI BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
BNX2X_Q_FLG_PCSUM_ON_PKT
}; };
/* Queue type options: queue type may be a compination of below. */ /* Queue type options: queue type may be a compination of below. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment