Commit 1bc7fe64 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2016-04-25

This series contains updates to ixgbe and ixgbevf.

Emil provides several patches, starting with the consolidation of the
logic behind configuring spoof checking.  Fixed an issue which was
causing link issues for backplane devices because x550em_a/x devices
did not have a default value for mac->ops.setup_link.  Refactored the
ethtool stats to bring the logic closer to how ixgbe handles stats and
sets up per-queue stats for ixgbevf.

Mark adds a new register to wait for previous register writes to complete
before issuing a register read, which is needed when slower links are
in use.  Fixed the flow control setup for x550em_a, the incorrect
fc_setup function was being used.

Don added a workaround for empty SFP+ cage crosstalk, since on some
systems the crosstalk could lead to link flap on empty SFP+ cages.

Jake converts ixgbe and ixgbevf to use the BIT() macro.

Alex Duyck adds support for partial GSO segmentation in the case of
tunnels for ixgbe and ixgbevf.  Then preps for HyperV by moving the API
negotiation into mac_ops.

Arnd Bergmann provides a fix for the ARM compile warnings in linux-next
by converting the use of a udelay() to msleep().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e7157f28 d4f90d9d
...@@ -143,14 +143,11 @@ struct vf_data_storage { ...@@ -143,14 +143,11 @@ struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN]; unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes; u16 num_vf_mc_hashes;
u16 default_vf_vlan_id;
u16 vlans_enabled;
bool clear_to_send; bool clear_to_send;
bool pf_set_mac; bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos; u16 pf_qos;
u16 tx_rate; u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled; u8 spoofchk_enabled;
bool rss_query_enabled; bool rss_query_enabled;
u8 trusted; u8 trusted;
...@@ -173,7 +170,7 @@ struct vf_macvlans { ...@@ -173,7 +170,7 @@ struct vf_macvlans {
}; };
#define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) #define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */ /* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
...@@ -623,44 +620,44 @@ struct ixgbe_adapter { ...@@ -623,44 +620,44 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags. * thus the additional *_CAPABLE flags.
*/ */
u32 flags; u32 flags;
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) #define IXGBE_FLAG_MSI_ENABLED BIT(1)
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) #define IXGBE_FLAG_MSIX_ENABLED BIT(3)
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) #define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) #define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) #define IXGBE_FLAG_DCA_ENABLED BIT(8)
#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) #define IXGBE_FLAG_DCA_CAPABLE BIT(9)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) #define IXGBE_FLAG_IMIR_ENABLED BIT(10)
#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11) #define IXGBE_FLAG_MQ_CAPABLE BIT(11)
#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12) #define IXGBE_FLAG_DCB_ENABLED BIT(12)
#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13) #define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14) #define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15) #define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16) #define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17) #define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18) #define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19) #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20) #define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) #define IXGBE_FLAG_FCOE_ENABLED BIT(21)
#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) #define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) #define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
u32 flags2; u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) #define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) #define IXGBE_FLAG2_RSC_ENABLED BIT(1)
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) #define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
...@@ -806,6 +803,8 @@ struct ixgbe_adapter { ...@@ -806,6 +803,8 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
bool need_crosstalk_fix;
}; };
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
...@@ -828,7 +827,7 @@ struct ixgbe_fdir_filter { ...@@ -828,7 +827,7 @@ struct ixgbe_fdir_filter {
struct hlist_node fdir_node; struct hlist_node fdir_node;
union ixgbe_atr_input filter; union ixgbe_atr_input filter;
u16 sw_idx; u16 sw_idx;
u16 action; u64 action;
}; };
enum ixgbe_state_t { enum ixgbe_state_t {
......
...@@ -792,7 +792,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) ...@@ -792,7 +792,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
} }
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
/* /*
...@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, ...@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on) if (vlan_on)
/* Turn on this VLAN id */ /* Turn on this VLAN id */
bits |= (1 << bitindex); bits |= BIT(bitindex);
else else
/* Turn off this VLAN id */ /* Turn off this VLAN id */
bits &= ~(1 << bitindex); bits &= ~BIT(bitindex);
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
return 0; return 0;
......
...@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) ...@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
do { \ do { \
u32 n = (_n); \ u32 n = (_n); \
if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
common_hash ^= lo_hash_dword >> n; \ common_hash ^= lo_hash_dword >> n; \
else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \ bucket_hash ^= lo_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
sig_hash ^= lo_hash_dword << (16 - n); \ sig_hash ^= lo_hash_dword << (16 - n); \
if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
common_hash ^= hi_hash_dword >> n; \ common_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \ bucket_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
sig_hash ^= hi_hash_dword << (16 - n); \ sig_hash ^= hi_hash_dword << (16 - n); \
} while (0) } while (0)
...@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, ...@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
do { \ do { \
u32 n = (_n); \ u32 n = (_n); \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \ bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \ bucket_hash ^= hi_hash_dword >> n; \
} while (0) } while (0)
......
...@@ -825,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) ...@@ -825,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*/ */
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT); IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size + eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT); IXGBE_EEPROM_WORD_SIZE_SHIFT);
} }
if (eec & IXGBE_EEC_ADDR_SIZE) if (eec & IXGBE_EEC_ADDR_SIZE)
...@@ -1502,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, ...@@ -1502,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* Mask is used to shift "count" bits of "data" out to the EEPROM * Mask is used to shift "count" bits of "data" out to the EEPROM
* one bit at a time. Determine the starting bit based on count * one bit at a time. Determine the starting bit based on count
*/ */
mask = 0x01 << (count - 1); mask = BIT(count - 1);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
/* /*
...@@ -1991,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) ...@@ -1991,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
*/ */
vector_reg = (vector >> 5) & 0x7F; vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F; vector_bit = vector & 0x1F;
hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
} }
/** /**
...@@ -2921,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ...@@ -2921,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_hi = 0; mpsar_hi = 0;
} }
} else if (vmdq < 32) { } else if (vmdq < 32) {
mpsar_lo &= ~(1 << vmdq); mpsar_lo &= ~BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else { } else {
mpsar_hi &= ~(1 << (vmdq - 32)); mpsar_hi &= ~BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
} }
...@@ -2953,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ...@@ -2953,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
if (vmdq < 32) { if (vmdq < 32) {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar |= 1 << vmdq; mpsar |= BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
} else { } else {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
mpsar |= 1 << (vmdq - 32); mpsar |= BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
} }
return 0; return 0;
...@@ -2978,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) ...@@ -2978,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
u32 rar = hw->mac.san_mac_rar_index; u32 rar = hw->mac.san_mac_rar_index;
if (vmdq < 32) { if (vmdq < 32) {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
} else { } else {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
} }
return 0; return 0;
...@@ -3082,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, ...@@ -3082,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register * bits[4-0]: which bit in the register
*/ */
regidx = vlan / 32; regidx = vlan / 32;
vfta_delta = 1 << (vlan % 32); vfta_delta = BIT(vlan % 32);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
/* vfta_delta represents the difference between the current value /* vfta_delta represents the difference between the current value
...@@ -3113,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, ...@@ -3113,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
/* set the pool bit */ /* set the pool bit */
bits |= 1 << (vind % 32); bits |= BIT(vind % 32);
if (vlan_on) if (vlan_on)
goto vlvf_update; goto vlvf_update;
/* clear the pool bit */ /* clear the pool bit */
bits ^= 1 << (vind % 32); bits ^= BIT(vind % 32);
if (!bits && if (!bits &&
!IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
...@@ -3310,43 +3310,25 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, ...@@ -3310,43 +3310,25 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
/** /**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
* @enable: enable or disable switch for anti-spoofing * @enable: enable or disable switch for MAC anti-spoofing
* @pf: Physical Function pool - do not enable anti-spoofing for the PF * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
* *
**/ **/
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{ {
int j; int vf_target_reg = vf >> 3;
int pf_target_reg = pf >> 3; int vf_target_shift = vf % 8;
int pf_target_shift = pf % 8; u32 pfvfspoof;
u32 pfvfspoof = 0;
if (hw->mac.type == ixgbe_mac_82598EB) if (hw->mac.type == ixgbe_mac_82598EB)
return; return;
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable) if (enable)
pfvfspoof = IXGBE_SPOOF_MACAS_MASK; pfvfspoof |= BIT(vf_target_shift);
else
/* pfvfspoof &= ~BIT(vf_target_shift);
* PFVFSPOOF register array is size 8 with 8 bits assigned to IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
* MAC anti-spoof enables in each register array element.
*/
for (j = 0; j < pf_target_reg; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
/*
* The PF should be allowed to spoof so that it can support
* emulation mode NICs. Do not set the bits assigned to the PF
*/
pfvfspoof &= (1 << pf_target_shift) - 1;
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
/*
* Remaining pools belong to the PF so they do not need to have
* anti-spoofing enabled.
*/
for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
} }
/** /**
...@@ -3367,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) ...@@ -3367,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable) if (enable)
pfvfspoof |= (1 << vf_target_shift); pfvfspoof |= BIT(vf_target_shift);
else else
pfvfspoof &= ~(1 << vf_target_shift); pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
} }
......
...@@ -106,7 +106,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); ...@@ -106,7 +106,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
......
...@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) ...@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
if (tc_config[tc].dcb_pfc != pfc_disabled) if (tc_config[tc].dcb_pfc != pfc_disabled)
*pfc_en |= 1 << tc; *pfc_en |= BIT(tc);
} }
} }
...@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, ...@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{ {
struct tc_configuration *tc_config = &cfg->tc_config[0]; struct tc_configuration *tc_config = &cfg->tc_config[0];
u8 prio_mask = 1 << up; u8 prio_mask = BIT(up);
u8 tc = cfg->num_tcs.pg_tcs; u8 tc = cfg->num_tcs.pg_tcs;
/* If tc is 0 then DCB is likely not enabled or supported */ /* If tc is 0 then DCB is likely not enabled or supported */
......
...@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) ...@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
/* Configure PFC Tx thresholds per TC */ /* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) { if (!(pfc_en & BIT(i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue; continue;
......
...@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) ...@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
int enabled = 0; int enabled = 0;
for (j = 0; j < MAX_USER_PRIORITY; j++) { for (j = 0; j < MAX_USER_PRIORITY; j++) {
if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
enabled = 1; enabled = 1;
break; break;
} }
......
...@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) ...@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
}; };
u8 up = dcb_getapp(adapter->netdev, &app); u8 up = dcb_getapp(adapter->netdev, &app);
if (up && !(up & (1 << adapter->fcoe.up))) if (up && !(up & BIT(adapter->fcoe.up)))
changes |= BIT_APP_UPCHG; changes |= BIT_APP_UPCHG;
#endif #endif
...@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, ...@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) { app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app); u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up)) if (app_mask & BIT(adapter->fcoe.up))
return 0; return 0;
adapter->fcoe.up = app->priority; adapter->fcoe.up = app->priority;
...@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, ...@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) { app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app); u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up)) if (app_mask & BIT(adapter->fcoe.up))
return 0; return 0;
adapter->fcoe.up = app_mask ? adapter->fcoe.up = app_mask ?
......
...@@ -1586,7 +1586,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) ...@@ -1586,7 +1586,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Test each interrupt */ /* Test each interrupt */
for (; i < 10; i++) { for (; i < 10; i++) {
/* Interrupt to test */ /* Interrupt to test */
mask = 1 << i; mask = BIT(i);
if (!shared_int) { if (!shared_int) {
/* /*
...@@ -3014,14 +3014,14 @@ static int ixgbe_get_ts_info(struct net_device *dev, ...@@ -3014,14 +3014,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->phc_index = -1; info->phc_index = -1;
info->tx_types = info->tx_types =
(1 << HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON); BIT(HWTSTAMP_TX_ON);
info->rx_filters = info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT); BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
break; break;
default: default:
return ethtool_op_get_ts_info(dev, info); return ethtool_op_get_ts_info(dev, info);
......
...@@ -371,6 +371,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) ...@@ -371,6 +371,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
if (ixgbe_removed(reg_addr)) if (ixgbe_removed(reg_addr))
return IXGBE_FAILED_READ_REG; return IXGBE_FAILED_READ_REG;
if (unlikely(hw->phy.nw_mng_if_sel &
IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
struct ixgbe_adapter *adapter;
int i;
for (i = 0; i < 200; ++i) {
value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
if (likely(!value))
goto writes_completed;
if (value == IXGBE_FAILED_READ_REG) {
ixgbe_remove_adapter(hw);
return IXGBE_FAILED_READ_REG;
}
udelay(5);
}
adapter = hw->back;
e_warn(hw, "register writes incomplete %08x\n", value);
}
writes_completed:
value = readl(reg_addr + reg); value = readl(reg_addr + reg);
if (unlikely(value == IXGBE_FAILED_READ_REG)) if (unlikely(value == IXGBE_FAILED_READ_REG))
ixgbe_check_remove(hw, reg); ixgbe_check_remove(hw, reg);
...@@ -2224,7 +2245,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) ...@@ -2224,7 +2245,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* Populate MSIX to EITR Select */ /* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) { if (adapter->num_vfs > 32) {
u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
} }
...@@ -2863,7 +2884,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ...@@ -2863,7 +2884,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
if (adapter->rx_itr_setting & 1) if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector); ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
return 0; return 0;
} }
...@@ -3156,15 +3177,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ...@@ -3156,15 +3177,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
* currently 40. * currently 40.
*/ */
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
txdctl |= (1 << 16); /* WTHRESH = 1 */ txdctl |= 1u << 16; /* WTHRESH = 1 */
else else
txdctl |= (8 << 16); /* WTHRESH = 8 */ txdctl |= 8u << 16; /* WTHRESH = 8 */
/* /*
* Setting PTHRESH to 32 both improves performance * Setting PTHRESH to 32 both improves performance
* and avoids a TX hang with DFP enabled * and avoids a TX hang with DFP enabled
*/ */
txdctl |= (1 << 8) | /* HTHRESH = 1 */ txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */ 32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */ /* reinitialize flowdirector state */
...@@ -3716,9 +3737,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) ...@@ -3716,9 +3737,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
return; return;
if (rss_i > 3) if (rss_i > 3)
psrtype |= 2 << 29; psrtype |= 2u << 29;
else if (rss_i > 1) else if (rss_i > 1)
psrtype |= 1 << 29; psrtype |= 1u << 29;
for_each_set_bit(pool, &adapter->fwd_bitmask, 32) for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
...@@ -3745,9 +3766,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) ...@@ -3745,9 +3766,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */ /* Enable only the PF's pool for Tx/Rx */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(vf_shift, 31));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(vf_shift, 31));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
if (adapter->bridge_mode == BRIDGE_MODE_VEB) if (adapter->bridge_mode == BRIDGE_MODE_VEB)
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
...@@ -3776,34 +3797,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) ...@@ -3776,34 +3797,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
adapter->num_vfs);
/* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
* calling set_ethertype_anti_spoofing for each VF in loop below
*/
if (hw->mac.ops.set_ethertype_anti_spoofing) {
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
IXGBE_ETH_P_LLDP));
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
ETH_P_PAUSE));
}
/* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) { for (i = 0; i < adapter->num_vfs; i++) {
if (!adapter->vfinfo[i].spoofchk_enabled) /* configure spoof checking */
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
adapter->vfinfo[i].spoofchk_enabled);
/* enable ethertype anti spoofing if hw supports it */
if (hw->mac.ops.set_ethertype_anti_spoofing)
hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
/* Enable/Disable RSS query feature */ /* Enable/Disable RSS query feature */
ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
...@@ -3997,7 +3994,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) ...@@ -3997,7 +3994,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
* entry other than the PF. * entry other than the PF.
*/ */
word = idx * 2 + (VMDQ_P(0) / 32); word = idx * 2 + (VMDQ_P(0) / 32);
bits = ~(1 << (VMDQ_P(0)) % 32); bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* Disable the filter so this falls into the default pool. */ /* Disable the filter so this falls into the default pool. */
...@@ -4132,7 +4129,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) ...@@ -4132,7 +4129,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
vlvfb |= 1 << (VMDQ_P(0) % 32); vlvfb |= BIT(VMDQ_P(0) % 32);
IXGBE_WRITE_REG(hw, reg_offset, vlvfb); IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
} }
...@@ -4162,7 +4159,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) ...@@ -4162,7 +4159,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
if (vlvf) { if (vlvf) {
/* record VLAN ID in VFTA */ /* record VLAN ID in VFTA */
vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */ /* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans)) if (test_bit(vid, adapter->active_vlans))
...@@ -4171,7 +4168,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) ...@@ -4171,7 +4168,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
/* remove PF from the pool */ /* remove PF from the pool */
word = i * 2 + VMDQ_P(0) / 32; word = i * 2 + VMDQ_P(0) / 32;
bits = ~(1 << (VMDQ_P(0) % 32)); bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
} }
...@@ -4865,9 +4862,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) ...@@ -4865,9 +4862,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
return; return;
if (rss_i > 3) if (rss_i > 3)
psrtype |= 2 << 29; psrtype |= 2u << 29;
else if (rss_i > 1) else if (rss_i > 1)
psrtype |= 1 << 29; psrtype |= 1u << 29;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
} }
...@@ -4931,7 +4928,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, ...@@ -4931,7 +4928,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
/* shutdown specific queue receive and wait for dma to settle */ /* shutdown specific queue receive and wait for dma to settle */
ixgbe_disable_rx_queue(adapter, rx_ring); ixgbe_disable_rx_queue(adapter, rx_ring);
usleep_range(10000, 20000); usleep_range(10000, 20000);
ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
ixgbe_clean_rx_ring(rx_ring); ixgbe_clean_rx_ring(rx_ring);
rx_ring->l2_accel_priv = NULL; rx_ring->l2_accel_priv = NULL;
} }
...@@ -5575,6 +5572,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -5575,6 +5572,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
unsigned int rss, fdir; unsigned int rss, fdir;
u32 fwsm; u32 fwsm;
u16 device_caps;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
int j; int j;
struct tc_configuration *tc; struct tc_configuration *tc;
...@@ -5740,6 +5738,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -5740,6 +5738,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
/* Cache bit indicating need for crosstalk fix */
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
adapter->need_crosstalk_fix = false;
else
adapter->need_crosstalk_fix = true;
break;
default:
adapter->need_crosstalk_fix = false;
break;
}
/* set default work limits */ /* set default work limits */
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
...@@ -6631,7 +6645,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) ...@@ -6631,7 +6645,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) { for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i]; struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring) if (qv->rx.ring || qv->tx.ring)
eics |= ((u64)1 << i); eics |= BIT_ULL(i);
} }
} }
...@@ -6662,6 +6676,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) ...@@ -6662,6 +6676,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
link_up = true; link_up = true;
} }
/* If Crosstalk fix enabled do the sanity check of making sure
* the SFP+ cage is empty.
*/
if (adapter->need_crosstalk_fix) {
u32 sfp_cage_full;
sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
IXGBE_ESDP_SDP2;
if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
link_up = false;
}
if (adapter->ixgbe_ieee_pfc) if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
...@@ -7008,6 +7034,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) ...@@ -7008,6 +7034,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
s32 err; s32 err;
/* If crosstalk fix enabled verify the SFP+ cage is full */
if (adapter->need_crosstalk_fix) {
u32 sfp_cage_full;
sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
IXGBE_ESDP_SDP2;
if (!sfp_cage_full)
return;
}
/* not searching for SFP so there is nothing to do here */ /* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
...@@ -7220,9 +7256,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -7220,9 +7256,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first,
u8 *hdr_len) u8 *hdr_len)
{ {
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd; union {
u32 mss_l4len_idx, l4len; struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} ip;
union {
struct tcphdr *tcp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -7235,46 +7280,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -7235,46 +7280,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (err < 0) if (err < 0)
return err; return err;
ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
if (first->protocol == htons(ETH_P_IP)) { /* initialize outer IP header fields */
struct iphdr *iph = ip_hdr(skb); if (ip.v4->version == 4) {
iph->tot_len = 0; /* IP header will have to cancel out any data that
iph->check = 0; * is not a part of the outer IP header
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, */
iph->daddr, 0, ip.v4->check = csum_fold(csum_add(lco_csum(skb),
IPPROTO_TCP, csum_unfold(l4.tcp->check)));
0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO | first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4; IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) { } else {
ipv6_hdr(skb)->payload_len = 0; ip.v6->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
first->tx_flags |= IXGBE_TX_FLAGS_TSO | first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM; IXGBE_TX_FLAGS_CSUM;
} }
/* compute header lengths */ /* determine offset of inner transport header */
l4len = tcp_hdrlen(skb); l4_offset = l4.hdr - skb->data;
*hdr_len = skb_transport_offset(skb) + l4len;
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs; first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len; first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 0 as index for TSO */ /* mss_l4len_id: use 0 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
...@@ -8862,17 +8913,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) ...@@ -8862,17 +8913,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter); kfree(fwd_adapter);
} }
#define IXGBE_MAX_TUNNEL_HDR_LEN 80 #define IXGBE_MAX_MAC_HDR_LEN 127
#define IXGBE_MAX_NETWORK_HDR_LEN 511
static netdev_features_t static netdev_features_t
ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features) netdev_features_t features)
{ {
if (!skb->encapsulation) unsigned int network_hdr_len, mac_hdr_len;
return features;
/* Make certain the headers can be described by a context descriptor */
if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > mac_hdr_len = skb_network_header(skb) - skb->data;
IXGBE_MAX_TUNNEL_HDR_LEN)) if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~NETIF_F_CSUM_MASK; return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_TSO |
NETIF_F_TSO6);
/* We can only support IPV4 TSO in tunnels if we can mangle the
* inner IP ID field, so strip TSO if MANGLEID is not supported.
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
features &= ~NETIF_F_TSO;
return features; return features;
} }
...@@ -9156,7 +9226,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -9156,7 +9226,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap; goto err_ioremap;
} }
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
if (!(eec & (1 << 8))) if (!(eec & BIT(8)))
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
/* PHY */ /* PHY */
...@@ -9239,31 +9309,44 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -9239,31 +9309,44 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO6 | NETIF_F_TSO6 |
NETIF_F_RXHASH | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_CSUM;
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPIP | \
NETIF_F_GSO_SIT | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
netdev->features |= NETIF_F_GSO_PARTIAL |
IXGBE_GSO_PARTIAL_FEATURES;
if (hw->mac.type >= ixgbe_mac_82599EB) if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->features |= NETIF_F_SCTP_CRC; netdev->features |= NETIF_F_SCTP_CRC;
/* copy netdev features into list of user selectable features */ /* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features; netdev->hw_features |= netdev->features |
netdev->hw_features |= NETIF_F_RXALL | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXALL |
NETIF_F_HW_L2FW_DOFFLOAD; NETIF_F_HW_L2FW_DOFFLOAD;
if (hw->mac.type >= ixgbe_mac_82599EB) if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->hw_features |= NETIF_F_NTUPLE | netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC; NETIF_F_HW_TC;
netdev->vlan_features |= NETIF_F_SG | if (pci_using_dac)
NETIF_F_TSO | netdev->features |= NETIF_F_HIGHDMA;
NETIF_F_TSO6 |
NETIF_F_HW_CSUM | /* set this bit last since it cannot be part of vlan_features */
NETIF_F_SCTP_CRC; netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->hw_enc_features |= netdev->vlan_features;
netdev->mpls_features |= NETIF_F_HW_CSUM; netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->priv_flags |= IFF_SUPP_NOFCS;
...@@ -9294,10 +9377,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -9294,10 +9377,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_FCOE_MTU; NETIF_F_FCOE_MTU;
} }
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO; netdev->hw_features |= NETIF_F_LRO;
......
...@@ -314,8 +314,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) ...@@ -314,8 +314,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
break; break;
} }
if (vflre & (1 << vf_shift)) { if (vflre & BIT(vf_shift)) {
IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
hw->mbx.stats.rsts++; hw->mbx.stats.rsts++;
return 0; return 0;
} }
......
...@@ -107,7 +107,7 @@ ...@@ -107,7 +107,7 @@
#define IXGBE_PE 0xE0 /* Port expander addr */ #define IXGBE_PE 0xE0 /* Port expander addr */
#define IXGBE_PE_OUTPUT 1 /* Output reg offset */ #define IXGBE_PE_OUTPUT 1 /* Output reg offset */
#define IXGBE_PE_CONFIG 3 /* Config reg offset */ #define IXGBE_PE_CONFIG 3 /* Config reg offset */
#define IXGBE_PE_BIT1 (1 << 1) #define IXGBE_PE_BIT1 BIT(1)
/* Flow control defines */ /* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400 #define IXGBE_TAF_SYM_PAUSE 0x400
......
...@@ -396,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) ...@@ -396,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
if (incval > 0x00FFFFFFULL) if (incval > 0x00FFFFFFULL)
e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
(1 << IXGBE_INCPER_SHIFT_82599) | BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL)); ((u32)incval & 0x00FFFFFFUL));
break; break;
default: default:
...@@ -1114,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) ...@@ -1114,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
incval >>= IXGBE_INCVAL_SHIFT_82599; incval >>= IXGBE_INCVAL_SHIFT_82599;
cc.shift -= IXGBE_INCVAL_SHIFT_82599; cc.shift -= IXGBE_INCVAL_SHIFT_82599;
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
(1 << IXGBE_INCPER_SHIFT_82599) | incval); BIT(IXGBE_INCPER_SHIFT_82599) | incval);
break; break;
default: default:
/* other devices aren't supported */ /* other devices aren't supported */
......
...@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, ...@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
mta_reg |= (1 << vector_bit); mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
} }
vmolr |= IXGBE_VMOLR_ROMPE; vmolr |= IXGBE_VMOLR_ROMPE;
...@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) ...@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
mta_reg |= (1 << vector_bit); mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
} }
...@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ...@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* enable or disable receive depending on error */ /* enable or disable receive depending on error */
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
if (err) if (err)
vfre &= ~(1 << vf_shift); vfre &= ~BIT(vf_shift);
else else
vfre |= 1 << vf_shift; vfre |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
if (err) { if (err) {
...@@ -592,8 +592,8 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) ...@@ -592,8 +592,8 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
u32 vlvfb_mask, pool_mask, i; u32 vlvfb_mask, pool_mask, i;
/* create mask for VF and other pools */ /* create mask for VF and other pools */
pool_mask = ~(1 << (VMDQ_P(0) % 32)); pool_mask = ~BIT(VMDQ_P(0) % 32);
vlvfb_mask = 1 << (vf % 32); vlvfb_mask = BIT(vf % 32);
/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
for (i = IXGBE_VLVF_ENTRIES; i--;) { for (i = IXGBE_VLVF_ENTRIES; i--;) {
...@@ -629,7 +629,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) ...@@ -629,7 +629,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
goto update_vlvfb; goto update_vlvfb;
vid = vlvf & VLAN_VID_MASK; vid = vlvf & VLAN_VID_MASK;
mask = 1 << (vid % 32); mask = BIT(vid % 32);
/* clear bit from VFTA */ /* clear bit from VFTA */
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
...@@ -813,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ...@@ -813,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable transmit for vf */ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= 1 << vf_shift; reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* force drop enable for all VF Rx queues */ /* force drop enable for all VF Rx queues */
...@@ -821,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ...@@ -821,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable receive for vf */ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= 1 << vf_shift; reg |= BIT(vf_shift);
/* /*
* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
* For more info take a look at ixgbe_set_vf_lpe * For more info take a look at ixgbe_set_vf_lpe
...@@ -837,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ...@@ -837,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
#endif /* CONFIG_FCOE */ #endif /* CONFIG_FCOE */
if (pf_max_frame > ETH_FRAME_LEN) if (pf_max_frame > ETH_FRAME_LEN)
reg &= ~(1 << vf_shift); reg &= ~BIT(vf_shift);
} }
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
...@@ -846,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ...@@ -846,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* Enable counting of spoofed packets in the SSVPC register */ /* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift); reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/* /*
...@@ -908,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, ...@@ -908,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = netdev_get_num_tc(adapter->netdev); u8 tcs = netdev_get_num_tc(adapter->netdev);
struct ixgbe_hw *hw = &adapter->hw;
int err;
if (adapter->vfinfo[vf].pf_vlan || tcs) { if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv, e_warn(drv,
...@@ -923,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, ...@@ -923,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
if (!vid && !add) if (!vid && !add)
return 0; return 0;
err = ixgbe_set_vf_vlan(adapter, add, vid, vf); return ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (err)
return err;
if (adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
if (add)
adapter->vfinfo[vf].vlan_count++;
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
return 0;
} }
static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
...@@ -964,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, ...@@ -964,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
* If the VF is allowed to set MAC filters then turn off * If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives. * anti-spoofing to avoid false positives.
*/ */
if (adapter->vfinfo[vf].spoofchk_enabled) if (adapter->vfinfo[vf].spoofchk_enabled) {
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); struct ixgbe_hw *hw = &adapter->hw;
hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
}
} }
err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
...@@ -1321,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, ...@@ -1321,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false); ixgbe_set_vmolr(hw, vf, false);
if (adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
adapter->vfinfo[vf].vlan_count++;
/* enable hide vlan on X550 */ /* enable hide vlan on X550 */
if (hw->mac.type >= ixgbe_mac_X550) if (hw->mac.type >= ixgbe_mac_X550)
...@@ -1356,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) ...@@ -1356,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_set_vf_vlan(adapter, true, 0, vf);
ixgbe_clear_vmvir(adapter, vf); ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true); ixgbe_set_vmolr(hw, vf, true);
hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
/* disable hide VLAN on X550 */ /* disable hide VLAN on X550 */
if (hw->mac.type >= ixgbe_mac_X550) if (hw->mac.type >= ixgbe_mac_X550)
...@@ -1525,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, ...@@ -1525,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 regval;
if (vf >= adapter->num_vfs) if (vf >= adapter->num_vfs)
return -EINVAL; return -EINVAL;
adapter->vfinfo[vf].spoofchk_enabled = setting; adapter->vfinfo[vf].spoofchk_enabled = setting;
regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); /* configure MAC spoofing */
regval &= ~(1 << vf_target_shift); hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
regval |= (setting << vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); /* configure VLAN spoofing */
hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
if (adapter->vfinfo[vf].vlan_count) {
vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT; /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); * calling set_ethertype_anti_spoofing for each VF in loop below
regval &= ~(1 << vf_target_shift); */
regval |= (setting << vf_target_shift); if (hw->mac.ops.set_ethertype_anti_spoofing) {
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
IXGBE_ETH_P_LLDP));
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
ETH_P_PAUSE));
hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
} }
return 0; return 0;
......
...@@ -697,16 +697,16 @@ struct ixgbe_thermal_sensor_data { ...@@ -697,16 +697,16 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ #define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ #define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) #define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ #define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */
#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ #define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */
#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ #define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */
#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ #define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ #define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 #define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 #define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
#define IXGBE_FCBUFF_OFFSET_SHIFT 16 #define IXGBE_FCBUFF_OFFSET_SHIFT 16
#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ #define IXGBE_FCDMARW_WE BIT(14) /* Write enable */
#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ #define IXGBE_FCDMARW_RE BIT(15) /* Read enable */
#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ #define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ #define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 #define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
...@@ -723,23 +723,23 @@ struct ixgbe_thermal_sensor_data { ...@@ -723,23 +723,23 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ #define IXGBE_FCFLT 0x05108 /* FC FLT Context */
#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ #define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ #define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ #define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */
#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ #define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */
#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ #define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ #define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ #define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */
#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ #define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */
#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ #define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */
/* FCoE Receive Control */ /* FCoE Receive Control */
#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ #define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ #define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */
#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ #define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */
#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ #define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */
#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ #define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */
#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ #define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */
#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ #define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */
#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ #define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */
#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ #define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */
#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ #define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 #define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
/* FCoE Redirection */ /* FCoE Redirection */
...@@ -1131,6 +1131,7 @@ struct ixgbe_thermal_sensor_data { ...@@ -1131,6 +1131,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_XPCSS 0x04290 #define IXGBE_XPCSS 0x04290
#define IXGBE_MFLCN 0x04294 #define IXGBE_MFLCN 0x04294
#define IXGBE_SERDESC 0x04298 #define IXGBE_SERDESC 0x04298
#define IXGBE_MAC_SGMII_BUSY 0x04298
#define IXGBE_MACS 0x0429C #define IXGBE_MACS 0x0429C
#define IXGBE_AUTOC 0x042A0 #define IXGBE_AUTOC 0x042A0
#define IXGBE_LINKS 0x042A4 #define IXGBE_LINKS 0x042A4
...@@ -1255,20 +1256,20 @@ struct ixgbe_thermal_sensor_data { ...@@ -1255,20 +1256,20 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ #define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ #define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ #define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ #define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ #define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ #define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ #define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ #define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ #define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ #define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ #define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ #define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */ /* MSCA Bit Masks */
...@@ -1747,7 +1748,7 @@ enum { ...@@ -1747,7 +1748,7 @@ enum {
#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ #define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ #define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ #define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ #define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */
#define IXGBE_ETQF_POOL_SHIFT 20 #define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ #define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
...@@ -1873,20 +1874,20 @@ enum { ...@@ -1873,20 +1874,20 @@ enum {
#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 #define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) #define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) #define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) #define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) #define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) #define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) #define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) #define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 #define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 #define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 #define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
...@@ -2123,6 +2124,7 @@ enum { ...@@ -2123,6 +2124,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7)
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 #define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1 #define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ #define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
...@@ -2838,15 +2840,15 @@ struct ixgbe_adv_tx_context_desc { ...@@ -2838,15 +2840,15 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ #define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ #define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */
#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ #define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */
#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ #define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */
#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ #define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */
#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ #define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */
#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ #define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */
#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ #define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */
#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ #define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */
#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ #define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
...@@ -3581,7 +3583,7 @@ struct ixgbe_info { ...@@ -3581,7 +3583,7 @@ struct ixgbe_info {
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5) #define IXGBE_FUSES0_300MHZ BIT(5)
#define IXGBE_FUSES0_REV_MASK (3 << 6) #define IXGBE_FUSES0_REV_MASK (3u << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
...@@ -3595,25 +3597,25 @@ struct ixgbe_info { ...@@ -3595,25 +3597,25 @@ struct ixgbe_info {
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31)
#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) #define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28)
#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) #define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29)
#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) #define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0)
#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) #define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1)
...@@ -3621,28 +3623,28 @@ struct ixgbe_info { ...@@ -3621,28 +3623,28 @@ struct ixgbe_info {
#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12)
#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) #define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19)
#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) #define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6)
#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) #define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15)
#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) #define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) #define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) #define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2)
#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) #define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16)
#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) #define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1)
#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) #define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2)
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) #define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) #define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
#define IXGBE_KX4_LINK_CNTL_1 0x4C #define IXGBE_KX4_LINK_CNTL_1 0x4C
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) #define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) #define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) #define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) #define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) #define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) #define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) #define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 #define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 #define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
...@@ -3658,7 +3660,7 @@ struct ixgbe_info { ...@@ -3658,7 +3660,7 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) #define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 #define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 #define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 #define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
......
...@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) ...@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT); IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size + eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT); IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size); eeprom->type, eeprom->word_size);
......
...@@ -335,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) ...@@ -335,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT); IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size + eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT); IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size); eeprom->type, eeprom->word_size);
...@@ -2646,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, ...@@ -2646,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable) if (enable)
pfvfspoof |= (1 << vf_target_shift); pfvfspoof |= BIT(vf_target_shift);
else else
pfvfspoof &= ~(1 << vf_target_shift); pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
} }
...@@ -2765,7 +2765,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) ...@@ -2765,7 +2765,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync_X540(hw, hmask); ixgbe_release_swfw_sync_X540(hw, hmask);
if (status != IXGBE_ERR_TOKEN_RETRY) if (status != IXGBE_ERR_TOKEN_RETRY)
return status; return status;
udelay(FW_PHY_TOKEN_DELAY * 1000); msleep(FW_PHY_TOKEN_DELAY);
} }
return status; return status;
...@@ -2908,7 +2908,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { ...@@ -2908,7 +2908,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
.get_media_type = &ixgbe_get_media_type_X550em, .get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL, .get_san_mac_addr = NULL,
.get_wwn_prefix = NULL, .get_wwn_prefix = NULL,
.setup_link = NULL, /* defined later */ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em, .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
.get_bus_info = &ixgbe_get_bus_info_X550em, .get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em,
...@@ -2932,7 +2932,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { ...@@ -2932,7 +2932,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.setup_sfp = ixgbe_setup_sfp_modules_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em,
.acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
.release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
.setup_fc = ixgbe_setup_fc_generic, .setup_fc = ixgbe_setup_fc_x550em,
.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
}; };
......
...@@ -74,7 +74,7 @@ typedef u32 ixgbe_link_speed; ...@@ -74,7 +74,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDCTL_RLPML_EN 0x00008000 #define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */ /* DCA Control */
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
/* PSRTYPE bit definitions */ /* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010 #define IXGBE_PSRTYPE_TCPHDR 0x00000010
...@@ -296,16 +296,16 @@ struct ixgbe_adv_tx_context_desc { ...@@ -296,16 +296,16 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ #define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ #define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ #define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */
#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ #define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */
#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ #define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */
#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ #define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */
#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ #define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ #define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ #define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ #define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#endif /* _IXGBEVF_DEFINES_H_ */ #endif /* _IXGBEVF_DEFINES_H_ */
...@@ -42,65 +42,54 @@ ...@@ -42,65 +42,54 @@
#define IXGBE_ALL_RAR_ENTRIES 16 #define IXGBE_ALL_RAR_ENTRIES 16
enum {NETDEV_STATS, IXGBEVF_STATS};
struct ixgbe_stats { struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN]; char stat_string[ETH_GSTRING_LEN];
struct { int type;
int sizeof_stat; int sizeof_stat;
int stat_offset; int stat_offset;
int base_stat_offset;
int saved_reset_offset;
};
}; };
#define IXGBEVF_STAT(m, b, r) { \ #define IXGBEVF_STAT(_name, _stat) { \
.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ .stat_string = _name, \
.stat_offset = offsetof(struct ixgbevf_adapter, m), \ .type = IXGBEVF_STATS, \
.base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
.saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
} }
#define IXGBEVF_ZSTAT(m) { \ #define IXGBEVF_NETDEV_STAT(_net_stat) { \
.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ .stat_string = #_net_stat, \
.stat_offset = offsetof(struct ixgbevf_adapter, m), \ .type = NETDEV_STATS, \
.base_stat_offset = -1, \ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
.saved_reset_offset = -1 \ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
} }
static const struct ixgbe_stats ixgbe_gstrings_stats[] = { static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, IXGBEVF_NETDEV_STAT(rx_packets),
stats.saved_reset_vfgprc)}, IXGBEVF_NETDEV_STAT(tx_packets),
{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, IXGBEVF_NETDEV_STAT(rx_bytes),
stats.saved_reset_vfgptc)}, IXGBEVF_NETDEV_STAT(tx_bytes),
{"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, IXGBEVF_STAT("tx_busy", tx_busy),
stats.saved_reset_vfgorc)}, IXGBEVF_STAT("tx_restart_queue", restart_queue),
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
stats.saved_reset_vfgotc)}, IXGBEVF_NETDEV_STAT(multicast),
{"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
{"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
{"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
#ifdef BP_EXTENDED_STATS
{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
{"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
{"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
{"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
{"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
#endif
}; };
#define IXGBE_QUEUE_STATS_LEN 0 #define IXGBEVF_QUEUE_STATS_LEN ( \
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
(sizeof(struct ixgbe_stats) / sizeof(u64)))
#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Register test (offline)",
"Link test (on/offline)" "Link test (on/offline)"
}; };
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev, static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd) struct ethtool_cmd *ecmd)
...@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, ...@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
memset(p, 0, regs_len); memset(p, 0, regs_len);
regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; /* generate a number suitable for ethtool's register version */
regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */ /* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
...@@ -392,13 +382,13 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -392,13 +382,13 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
return err; return err;
} }
static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
{ {
switch (stringset) { switch (stringset) {
case ETH_SS_TEST: case ETH_SS_TEST:
return IXGBE_TEST_LEN; return IXGBEVF_TEST_LEN;
case ETH_SS_STATS: case ETH_SS_STATS:
return IXGBE_GLOBAL_STATS_LEN; return IXGBEVF_STATS_LEN;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, ...@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *base = (char *)adapter; struct rtnl_link_stats64 temp;
int i; const struct rtnl_link_stats64 *net_stats;
#ifdef BP_EXTENDED_STATS unsigned int start;
u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, struct ixgbevf_ring *ring;
tx_yields = 0, tx_cleaned = 0, tx_missed = 0; int i, j;
char *p;
for (i = 0; i < adapter->num_rx_queues; i++) { ixgbevf_update_stats(adapter);
rx_yields += adapter->rx_ring[i]->stats.yields; net_stats = dev_get_stats(netdev, &temp);
rx_cleaned += adapter->rx_ring[i]->stats.cleaned; for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
rx_yields += adapter->rx_ring[i]->stats.yields; switch (ixgbevf_gstrings_stats[i].type) {
} case NETDEV_STATS:
p = (char *)net_stats +
ixgbevf_gstrings_stats[i].stat_offset;
break;
case IXGBEVF_STATS:
p = (char *)adapter +
ixgbevf_gstrings_stats[i].stat_offset;
break;
default:
data[i] = 0;
continue;
}
for (i = 0; i < adapter->num_tx_queues; i++) { data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
tx_yields += adapter->tx_ring[i]->stats.yields; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
tx_yields += adapter->tx_ring[i]->stats.yields;
} }
adapter->bp_rx_yields = rx_yields; /* populate Tx queue data */
adapter->bp_rx_cleaned = rx_cleaned; for (j = 0; j < adapter->num_tx_queues; j++) {
adapter->bp_rx_missed = rx_missed; ring = adapter->tx_ring[j];
if (!ring) {
data[i++] = 0;
data[i++] = 0;
#ifdef BP_EXTENDED_STATS
data[i++] = 0;
data[i++] = 0;
data[i++] = 0;
#endif
continue;
}
adapter->bp_tx_yields = tx_yields; do {
adapter->bp_tx_cleaned = tx_cleaned; start = u64_stats_fetch_begin_irq(&ring->syncp);
adapter->bp_tx_missed = tx_missed; data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i + 1] = ring->stats.misses;
data[i + 2] = ring->stats.cleaned;
i += 3;
#endif #endif
}
ixgbevf_update_stats(adapter); /* populate Rx queue data */
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { for (j = 0; j < adapter->num_rx_queues; j++) {
char *p = base + ixgbe_gstrings_stats[i].stat_offset; ring = adapter->rx_ring[j];
char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; if (!ring) {
char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; data[i++] = 0;
data[i++] = 0;
if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { #ifdef BP_EXTENDED_STATS
if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) data[i++] = 0;
data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; data[i++] = 0;
else data[i++] = 0;
data[i] = *(u64 *)p; #endif
} else { continue;
if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
else
data[i] = *(u32 *)p;
} }
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i + 1] = ring->stats.misses;
data[i + 2] = ring->stats.cleaned;
i += 3;
#endif
} }
} }
static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data) u8 *data)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data; char *p = (char *)data;
int i; int i;
switch (stringset) { switch (stringset) {
case ETH_SS_TEST: case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test, memcpy(data, *ixgbe_gstrings_test,
IXGBE_TEST_LEN * ETH_GSTRING_LEN); IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
break; break;
case ETH_SS_STATS: case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
memcpy(p, ixgbe_gstrings_stats[i].stat_string, memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
for (i = 0; i < adapter->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef BP_EXTENDED_STATS
sprintf(p, "tx_queue_%u_bp_napi_yield", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef BP_EXTENDED_STATS
sprintf(p, "rx_queue_%u_bp_poll_yield", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* BP_EXTENDED_STATS */
}
break; break;
} }
} }
......
...@@ -166,10 +166,10 @@ struct ixgbevf_ring { ...@@ -166,10 +166,10 @@ struct ixgbevf_ring {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define IXGBE_TX_FLAGS_CSUM (u32)(1) #define IXGBE_TX_FLAGS_CSUM BIT(0)
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) #define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) #define IXGBE_TX_FLAGS_TSO BIT(2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) #define IXGBE_TX_FLAGS_IPV4 BIT(3)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
...@@ -422,16 +422,6 @@ struct ixgbevf_adapter { ...@@ -422,16 +422,6 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count; unsigned int tx_ring_count;
unsigned int rx_ring_count; unsigned int rx_ring_count;
#ifdef BP_EXTENDED_STATS
u64 bp_rx_yields;
u64 bp_rx_cleaned;
u64 bp_rx_missed;
u64 bp_tx_yields;
u64 bp_tx_cleaned;
u64 bp_tx_missed;
#endif
u8 __iomem *io_addr; /* Mainly for iounmap use */ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed; u32 link_speed;
bool link_up; bool link_up;
......
...@@ -1056,7 +1056,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ...@@ -1056,7 +1056,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state)) !test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter, ixgbevf_irq_enable_queues(adapter,
1 << q_vector->v_idx); BIT(q_vector->v_idx));
return 0; return 0;
} }
...@@ -1158,14 +1158,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ...@@ -1158,14 +1158,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
} }
/* add q_vector eims value to global eims_enable_mask */ /* add q_vector eims value to global eims_enable_mask */
adapter->eims_enable_mask |= 1 << v_idx; adapter->eims_enable_mask |= BIT(v_idx);
ixgbevf_write_eitr(q_vector); ixgbevf_write_eitr(q_vector);
} }
ixgbevf_set_ivar(adapter, -1, 1, v_idx); ixgbevf_set_ivar(adapter, -1, 1, v_idx);
/* setup eims_other and add value to global eims_enable_mask */ /* setup eims_other and add value to global eims_enable_mask */
adapter->eims_other = 1 << v_idx; adapter->eims_other = BIT(v_idx);
adapter->eims_enable_mask |= adapter->eims_other; adapter->eims_enable_mask |= adapter->eims_other;
} }
...@@ -1589,8 +1589,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, ...@@ -1589,8 +1589,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (8 << 16); /* WTHRESH = 8 */ txdctl |= (8 << 16); /* WTHRESH = 8 */
/* Setting PTHRESH to 32 both improves performance */ /* Setting PTHRESH to 32 both improves performance */
txdctl |= (1 << 8) | /* HTHRESH = 1 */ txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */ 32; /* PTHRESH = 32 */
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
...@@ -1646,7 +1646,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) ...@@ -1646,7 +1646,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
IXGBE_PSRTYPE_L2HDR; IXGBE_PSRTYPE_L2HDR;
if (adapter->num_rx_queues > 1) if (adapter->num_rx_queues > 1)
psrtype |= 1 << 29; psrtype |= BIT(29);
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
} }
...@@ -2056,7 +2056,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) ...@@ -2056,7 +2056,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) { while (api[idx] != ixgbe_mbox_api_unknown) {
err = ixgbevf_negotiate_api_version(hw, api[idx]); err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
if (!err) if (!err)
break; break;
idx++; idx++;
...@@ -2797,7 +2797,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) ...@@ -2797,7 +2797,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *qv = adapter->q_vector[i]; struct ixgbevf_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring) if (qv->rx.ring || qv->tx.ring)
eics |= 1 << i; eics |= BIT(i);
} }
/* Cause software interrupt to ensure rings are cleaned */ /* Cause software interrupt to ensure rings are cleaned */
...@@ -3272,9 +3272,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3272,9 +3272,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first, struct ixgbevf_tx_buffer *first,
u8 *hdr_len) u8 *hdr_len)
{ {
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd; union {
u32 mss_l4len_idx, l4len; struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} ip;
union {
struct tcphdr *tcp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -3287,49 +3296,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3287,49 +3296,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (err < 0) if (err < 0)
return err; return err;
ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
if (first->protocol == htons(ETH_P_IP)) { /* initialize outer IP header fields */
struct iphdr *iph = ip_hdr(skb); if (ip.v4->version == 4) {
/* IP header will have to cancel out any data that
iph->tot_len = 0; * is not a part of the outer IP header
iph->check = 0; */
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, ip.v4->check = csum_fold(csum_add(lco_csum(skb),
iph->daddr, 0, csum_unfold(l4.tcp->check)));
IPPROTO_TCP,
0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO | first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4; IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) { } else {
ipv6_hdr(skb)->payload_len = 0; ip.v6->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
first->tx_flags |= IXGBE_TX_FLAGS_TSO | first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM; IXGBE_TX_FLAGS_CSUM;
} }
/* compute header lengths */ /* determine offset of inner transport header */
l4len = tcp_hdrlen(skb); l4_offset = l4.hdr - skb->data;
*hdr_len += l4len;
*hdr_len = skb_transport_offset(skb) + l4len; /* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* update GSO size and bytecount with header size */ /* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs; first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len; first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 1 as index for TSO */ /* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
...@@ -3422,7 +3435,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, ...@@ -3422,7 +3435,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
/* use index 1 context for TSO/FSO/FCOE */ /* use index 1 context for TSO/FSO/FCOE */
if (tx_flags & IXGBE_TX_FLAGS_TSO) if (tx_flags & IXGBE_TX_FLAGS_TSO)
olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it /* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running * always is for case where virtual functions are running
...@@ -3870,6 +3883,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, ...@@ -3870,6 +3883,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats; return stats;
} }
#define IXGBEVF_MAX_MAC_HDR_LEN 127
#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
static netdev_features_t
ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
mac_hdr_len = skb_network_header(skb) - skb->data;
if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_TSO |
NETIF_F_TSO6);
/* We can only support IPV4 TSO in tunnels if we can mangle the
* inner IP ID field, so strip TSO if MANGLEID is not supported.
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
features &= ~NETIF_F_TSO;
return features;
}
static const struct net_device_ops ixgbevf_netdev_ops = { static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open, .ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close, .ndo_stop = ixgbevf_close,
...@@ -3888,7 +3935,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { ...@@ -3888,7 +3935,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll, .ndo_poll_controller = ixgbevf_netpoll,
#endif #endif
.ndo_features_check = passthru_features_check, .ndo_features_check = ixgbevf_features_check,
}; };
static void ixgbevf_assign_netdev_ops(struct net_device *dev) static void ixgbevf_assign_netdev_ops(struct net_device *dev)
...@@ -3999,23 +4046,31 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3999,23 +4046,31 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM | NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC; NETIF_F_SCTP_CRC;
netdev->features = netdev->hw_features | #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO_GRE_CSUM | \
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GSO_IPIP | \
NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_GSO_SIT | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
netdev->vlan_features |= NETIF_F_SG | netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
NETIF_F_TSO | netdev->hw_features |= NETIF_F_GSO_PARTIAL |
NETIF_F_TSO6 | IXGBEVF_GSO_PARTIAL_FEATURES;
NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC;
netdev->mpls_features |= NETIF_F_HW_CSUM; netdev->features = netdev->hw_features;
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
if (pci_using_dac) if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= netdev->vlan_features;
/* set this bit last since it cannot be part of vlan_features */
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_UNICAST_FLT;
if (IXGBE_REMOVED(hw->hw_addr)) { if (IXGBE_REMOVED(hw->hw_addr)) {
......
...@@ -670,11 +670,11 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) ...@@ -670,11 +670,11 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
} }
/** /**
* ixgbevf_negotiate_api_version - Negotiate supported API version * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @api: integer containing requested API version * @api: integer containing requested API version
**/ **/
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
{ {
int err; int err;
u32 msg[3]; u32 msg[3];
...@@ -769,6 +769,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { ...@@ -769,6 +769,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.stop_adapter = ixgbevf_stop_hw_vf, .stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf, .setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf, .check_link = ixgbevf_check_mac_link_vf,
.negotiate_api_version = ixgbevf_negotiate_api_version_vf,
.set_rar = ixgbevf_set_rar_vf, .set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode, .update_xcast_mode = ixgbevf_update_xcast_mode,
......
...@@ -51,6 +51,7 @@ struct ixgbe_mac_operations { ...@@ -51,6 +51,7 @@ struct ixgbe_mac_operations {
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *);
s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
/* Link */ /* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
...@@ -208,7 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, ...@@ -208,7 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc); unsigned int *default_tc);
int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment