Commit 1bc7fe64 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2016-04-25

This series contains updates to ixgbe and ixgbevf.

Emil provides several patches, starting with the consolidation of the
logic behind configuring spoof checking.  Fixed an issue which was
causing link issues for backplane devices because x550em_a/x devices
did not have a default value for mac->ops.setup_link.  Refactored the
ethtool stats to bring the logic closer to how ixgbe handles stats and
sets up per-queue stats for ixgbevf.

Mark adds a new register to wait for previous register writes to complete
before issuing a register read, which is needed when slower links are
in use.  Fixed the flow control setup for x550em_a, the incorrect
fc_setup function was being used.

Don added a workaround for empty SFP+ cage crosstalk, since on some
systems the crosstalk could lead to link flap on empty SFP+ cages.

Jake converts ixgbe and ixgbevf to use the BIT() macro.

Alex Duyck adds support for partial GSO segmentation in the case of
tunnels for ixgbe and ixgbevf.  Then preps for HyperV by moving the API
negotiation into mac_ops.

Arnd Bergmann provides a fix for the ARM compile warnings in linux-next
by converting the use of a udelay() to msleep().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e7157f28 d4f90d9d
...@@ -143,14 +143,11 @@ struct vf_data_storage { ...@@ -143,14 +143,11 @@ struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN]; unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes; u16 num_vf_mc_hashes;
u16 default_vf_vlan_id;
u16 vlans_enabled;
bool clear_to_send; bool clear_to_send;
bool pf_set_mac; bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos; u16 pf_qos;
u16 tx_rate; u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled; u8 spoofchk_enabled;
bool rss_query_enabled; bool rss_query_enabled;
u8 trusted; u8 trusted;
...@@ -173,7 +170,7 @@ struct vf_macvlans { ...@@ -173,7 +170,7 @@ struct vf_macvlans {
}; };
#define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) #define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */ /* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
...@@ -623,44 +620,44 @@ struct ixgbe_adapter { ...@@ -623,44 +620,44 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags. * thus the additional *_CAPABLE flags.
*/ */
u32 flags; u32 flags;
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) #define IXGBE_FLAG_MSI_ENABLED BIT(1)
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) #define IXGBE_FLAG_MSIX_ENABLED BIT(3)
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) #define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) #define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) #define IXGBE_FLAG_DCA_ENABLED BIT(8)
#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) #define IXGBE_FLAG_DCA_CAPABLE BIT(9)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) #define IXGBE_FLAG_IMIR_ENABLED BIT(10)
#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11) #define IXGBE_FLAG_MQ_CAPABLE BIT(11)
#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12) #define IXGBE_FLAG_DCB_ENABLED BIT(12)
#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13) #define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14) #define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15) #define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16) #define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17) #define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18) #define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19) #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20) #define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) #define IXGBE_FLAG_FCOE_ENABLED BIT(21)
#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) #define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) #define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
u32 flags2; u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) #define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) #define IXGBE_FLAG2_RSC_ENABLED BIT(1)
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) #define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
...@@ -806,6 +803,8 @@ struct ixgbe_adapter { ...@@ -806,6 +803,8 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
bool need_crosstalk_fix;
}; };
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
...@@ -828,7 +827,7 @@ struct ixgbe_fdir_filter { ...@@ -828,7 +827,7 @@ struct ixgbe_fdir_filter {
struct hlist_node fdir_node; struct hlist_node fdir_node;
union ixgbe_atr_input filter; union ixgbe_atr_input filter;
u16 sw_idx; u16 sw_idx;
u16 action; u64 action;
}; };
enum ixgbe_state_t { enum ixgbe_state_t {
......
...@@ -792,7 +792,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) ...@@ -792,7 +792,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
} }
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
/* /*
...@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, ...@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on) if (vlan_on)
/* Turn on this VLAN id */ /* Turn on this VLAN id */
bits |= (1 << bitindex); bits |= BIT(bitindex);
else else
/* Turn off this VLAN id */ /* Turn off this VLAN id */
bits &= ~(1 << bitindex); bits &= ~BIT(bitindex);
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
return 0; return 0;
......
...@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) ...@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
do { \ do { \
u32 n = (_n); \ u32 n = (_n); \
if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
common_hash ^= lo_hash_dword >> n; \ common_hash ^= lo_hash_dword >> n; \
else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \ bucket_hash ^= lo_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
sig_hash ^= lo_hash_dword << (16 - n); \ sig_hash ^= lo_hash_dword << (16 - n); \
if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
common_hash ^= hi_hash_dword >> n; \ common_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \ bucket_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
sig_hash ^= hi_hash_dword << (16 - n); \ sig_hash ^= hi_hash_dword << (16 - n); \
} while (0) } while (0)
...@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, ...@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
do { \ do { \
u32 n = (_n); \ u32 n = (_n); \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \ bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \ bucket_hash ^= hi_hash_dword >> n; \
} while (0) } while (0)
......
...@@ -825,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) ...@@ -825,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*/ */
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT); IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size + eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT); IXGBE_EEPROM_WORD_SIZE_SHIFT);
} }
if (eec & IXGBE_EEC_ADDR_SIZE) if (eec & IXGBE_EEC_ADDR_SIZE)
...@@ -1502,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, ...@@ -1502,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* Mask is used to shift "count" bits of "data" out to the EEPROM * Mask is used to shift "count" bits of "data" out to the EEPROM
* one bit at a time. Determine the starting bit based on count * one bit at a time. Determine the starting bit based on count
*/ */
mask = 0x01 << (count - 1); mask = BIT(count - 1);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
/* /*
...@@ -1991,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) ...@@ -1991,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
*/ */
vector_reg = (vector >> 5) & 0x7F; vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F; vector_bit = vector & 0x1F;
hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
} }
/** /**
...@@ -2921,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ...@@ -2921,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_hi = 0; mpsar_hi = 0;
} }
} else if (vmdq < 32) { } else if (vmdq < 32) {
mpsar_lo &= ~(1 << vmdq); mpsar_lo &= ~BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else { } else {
mpsar_hi &= ~(1 << (vmdq - 32)); mpsar_hi &= ~BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
} }
...@@ -2953,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ...@@ -2953,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
if (vmdq < 32) { if (vmdq < 32) {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar |= 1 << vmdq; mpsar |= BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
} else { } else {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
mpsar |= 1 << (vmdq - 32); mpsar |= BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
} }
return 0; return 0;
...@@ -2978,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) ...@@ -2978,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
u32 rar = hw->mac.san_mac_rar_index; u32 rar = hw->mac.san_mac_rar_index;
if (vmdq < 32) { if (vmdq < 32) {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
} else { } else {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
} }
return 0; return 0;
...@@ -3082,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, ...@@ -3082,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register * bits[4-0]: which bit in the register
*/ */
regidx = vlan / 32; regidx = vlan / 32;
vfta_delta = 1 << (vlan % 32); vfta_delta = BIT(vlan % 32);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
/* vfta_delta represents the difference between the current value /* vfta_delta represents the difference between the current value
...@@ -3113,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, ...@@ -3113,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
/* set the pool bit */ /* set the pool bit */
bits |= 1 << (vind % 32); bits |= BIT(vind % 32);
if (vlan_on) if (vlan_on)
goto vlvf_update; goto vlvf_update;
/* clear the pool bit */ /* clear the pool bit */
bits ^= 1 << (vind % 32); bits ^= BIT(vind % 32);
if (!bits && if (!bits &&
!IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
...@@ -3310,43 +3310,25 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, ...@@ -3310,43 +3310,25 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
/** /**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
* @enable: enable or disable switch for anti-spoofing * @enable: enable or disable switch for MAC anti-spoofing
* @pf: Physical Function pool - do not enable anti-spoofing for the PF * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
* *
**/ **/
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{ {
int j; int vf_target_reg = vf >> 3;
int pf_target_reg = pf >> 3; int vf_target_shift = vf % 8;
int pf_target_shift = pf % 8; u32 pfvfspoof;
u32 pfvfspoof = 0;
if (hw->mac.type == ixgbe_mac_82598EB) if (hw->mac.type == ixgbe_mac_82598EB)
return; return;
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable) if (enable)
pfvfspoof = IXGBE_SPOOF_MACAS_MASK; pfvfspoof |= BIT(vf_target_shift);
else
/* pfvfspoof &= ~BIT(vf_target_shift);
* PFVFSPOOF register array is size 8 with 8 bits assigned to IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
* MAC anti-spoof enables in each register array element.
*/
for (j = 0; j < pf_target_reg; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
/*
* The PF should be allowed to spoof so that it can support
* emulation mode NICs. Do not set the bits assigned to the PF
*/
pfvfspoof &= (1 << pf_target_shift) - 1;
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
/*
* Remaining pools belong to the PF so they do not need to have
* anti-spoofing enabled.
*/
for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
} }
/** /**
...@@ -3367,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) ...@@ -3367,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable) if (enable)
pfvfspoof |= (1 << vf_target_shift); pfvfspoof |= BIT(vf_target_shift);
else else
pfvfspoof &= ~(1 << vf_target_shift); pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
} }
......
...@@ -106,7 +106,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); ...@@ -106,7 +106,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
......
...@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) ...@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
if (tc_config[tc].dcb_pfc != pfc_disabled) if (tc_config[tc].dcb_pfc != pfc_disabled)
*pfc_en |= 1 << tc; *pfc_en |= BIT(tc);
} }
} }
...@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, ...@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{ {
struct tc_configuration *tc_config = &cfg->tc_config[0]; struct tc_configuration *tc_config = &cfg->tc_config[0];
u8 prio_mask = 1 << up; u8 prio_mask = BIT(up);
u8 tc = cfg->num_tcs.pg_tcs; u8 tc = cfg->num_tcs.pg_tcs;
/* If tc is 0 then DCB is likely not enabled or supported */ /* If tc is 0 then DCB is likely not enabled or supported */
......
...@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) ...@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
/* Configure PFC Tx thresholds per TC */ /* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) { if (!(pfc_en & BIT(i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue; continue;
......
...@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) ...@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
int enabled = 0; int enabled = 0;
for (j = 0; j < MAX_USER_PRIORITY; j++) { for (j = 0; j < MAX_USER_PRIORITY; j++) {
if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
enabled = 1; enabled = 1;
break; break;
} }
......
...@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) ...@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
}; };
u8 up = dcb_getapp(adapter->netdev, &app); u8 up = dcb_getapp(adapter->netdev, &app);
if (up && !(up & (1 << adapter->fcoe.up))) if (up && !(up & BIT(adapter->fcoe.up)))
changes |= BIT_APP_UPCHG; changes |= BIT_APP_UPCHG;
#endif #endif
...@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, ...@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) { app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app); u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up)) if (app_mask & BIT(adapter->fcoe.up))
return 0; return 0;
adapter->fcoe.up = app->priority; adapter->fcoe.up = app->priority;
...@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, ...@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) { app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app); u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up)) if (app_mask & BIT(adapter->fcoe.up))
return 0; return 0;
adapter->fcoe.up = app_mask ? adapter->fcoe.up = app_mask ?
......
...@@ -1586,7 +1586,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) ...@@ -1586,7 +1586,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Test each interrupt */ /* Test each interrupt */
for (; i < 10; i++) { for (; i < 10; i++) {
/* Interrupt to test */ /* Interrupt to test */
mask = 1 << i; mask = BIT(i);
if (!shared_int) { if (!shared_int) {
/* /*
...@@ -3014,14 +3014,14 @@ static int ixgbe_get_ts_info(struct net_device *dev, ...@@ -3014,14 +3014,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->phc_index = -1; info->phc_index = -1;
info->tx_types = info->tx_types =
(1 << HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON); BIT(HWTSTAMP_TX_ON);
info->rx_filters = info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT); BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
break; break;
default: default:
return ethtool_op_get_ts_info(dev, info); return ethtool_op_get_ts_info(dev, info);
......
...@@ -314,8 +314,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) ...@@ -314,8 +314,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
break; break;
} }
if (vflre & (1 << vf_shift)) { if (vflre & BIT(vf_shift)) {
IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
hw->mbx.stats.rsts++; hw->mbx.stats.rsts++;
return 0; return 0;
} }
......
...@@ -107,7 +107,7 @@ ...@@ -107,7 +107,7 @@
#define IXGBE_PE 0xE0 /* Port expander addr */ #define IXGBE_PE 0xE0 /* Port expander addr */
#define IXGBE_PE_OUTPUT 1 /* Output reg offset */ #define IXGBE_PE_OUTPUT 1 /* Output reg offset */
#define IXGBE_PE_CONFIG 3 /* Config reg offset */ #define IXGBE_PE_CONFIG 3 /* Config reg offset */
#define IXGBE_PE_BIT1 (1 << 1) #define IXGBE_PE_BIT1 BIT(1)
/* Flow control defines */ /* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400 #define IXGBE_TAF_SYM_PAUSE 0x400
......
...@@ -396,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) ...@@ -396,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
if (incval > 0x00FFFFFFULL) if (incval > 0x00FFFFFFULL)
e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
(1 << IXGBE_INCPER_SHIFT_82599) | BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL)); ((u32)incval & 0x00FFFFFFUL));
break; break;
default: default:
...@@ -1114,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) ...@@ -1114,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
incval >>= IXGBE_INCVAL_SHIFT_82599; incval >>= IXGBE_INCVAL_SHIFT_82599;
cc.shift -= IXGBE_INCVAL_SHIFT_82599; cc.shift -= IXGBE_INCVAL_SHIFT_82599;
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
(1 << IXGBE_INCPER_SHIFT_82599) | incval); BIT(IXGBE_INCPER_SHIFT_82599) | incval);
break; break;
default: default:
/* other devices aren't supported */ /* other devices aren't supported */
......
...@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, ...@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
mta_reg |= (1 << vector_bit); mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
} }
vmolr |= IXGBE_VMOLR_ROMPE; vmolr |= IXGBE_VMOLR_ROMPE;
...@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) ...@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
mta_reg |= (1 << vector_bit); mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
} }
...@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ...@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* enable or disable receive depending on error */ /* enable or disable receive depending on error */
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
if (err) if (err)
vfre &= ~(1 << vf_shift); vfre &= ~BIT(vf_shift);
else else
vfre |= 1 << vf_shift; vfre |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
if (err) { if (err) {
...@@ -592,8 +592,8 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) ...@@ -592,8 +592,8 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
u32 vlvfb_mask, pool_mask, i; u32 vlvfb_mask, pool_mask, i;
/* create mask for VF and other pools */ /* create mask for VF and other pools */
pool_mask = ~(1 << (VMDQ_P(0) % 32)); pool_mask = ~BIT(VMDQ_P(0) % 32);
vlvfb_mask = 1 << (vf % 32); vlvfb_mask = BIT(vf % 32);
/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
for (i = IXGBE_VLVF_ENTRIES; i--;) { for (i = IXGBE_VLVF_ENTRIES; i--;) {
...@@ -629,7 +629,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) ...@@ -629,7 +629,7 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
goto update_vlvfb; goto update_vlvfb;
vid = vlvf & VLAN_VID_MASK; vid = vlvf & VLAN_VID_MASK;
mask = 1 << (vid % 32); mask = BIT(vid % 32);
/* clear bit from VFTA */ /* clear bit from VFTA */
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
...@@ -813,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ...@@ -813,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable transmit for vf */ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= 1 << vf_shift; reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* force drop enable for all VF Rx queues */ /* force drop enable for all VF Rx queues */
...@@ -821,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ...@@ -821,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable receive for vf */ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= 1 << vf_shift; reg |= BIT(vf_shift);
/* /*
* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
* For more info take a look at ixgbe_set_vf_lpe * For more info take a look at ixgbe_set_vf_lpe
...@@ -837,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ...@@ -837,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
#endif /* CONFIG_FCOE */ #endif /* CONFIG_FCOE */
if (pf_max_frame > ETH_FRAME_LEN) if (pf_max_frame > ETH_FRAME_LEN)
reg &= ~(1 << vf_shift); reg &= ~BIT(vf_shift);
} }
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
...@@ -846,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ...@@ -846,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* Enable counting of spoofed packets in the SSVPC register */ /* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift); reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/* /*
...@@ -908,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, ...@@ -908,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = netdev_get_num_tc(adapter->netdev); u8 tcs = netdev_get_num_tc(adapter->netdev);
struct ixgbe_hw *hw = &adapter->hw;
int err;
if (adapter->vfinfo[vf].pf_vlan || tcs) { if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv, e_warn(drv,
...@@ -923,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, ...@@ -923,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
if (!vid && !add) if (!vid && !add)
return 0; return 0;
err = ixgbe_set_vf_vlan(adapter, add, vid, vf); return ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (err)
return err;
if (adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
if (add)
adapter->vfinfo[vf].vlan_count++;
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
return 0;
} }
static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
...@@ -964,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, ...@@ -964,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
* If the VF is allowed to set MAC filters then turn off * If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives. * anti-spoofing to avoid false positives.
*/ */
if (adapter->vfinfo[vf].spoofchk_enabled) if (adapter->vfinfo[vf].spoofchk_enabled) {
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); struct ixgbe_hw *hw = &adapter->hw;
hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
}
} }
err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
...@@ -1321,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, ...@@ -1321,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false); ixgbe_set_vmolr(hw, vf, false);
if (adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
adapter->vfinfo[vf].vlan_count++;
/* enable hide vlan on X550 */ /* enable hide vlan on X550 */
if (hw->mac.type >= ixgbe_mac_X550) if (hw->mac.type >= ixgbe_mac_X550)
...@@ -1356,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) ...@@ -1356,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_set_vf_vlan(adapter, true, 0, vf);
ixgbe_clear_vmvir(adapter, vf); ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true); ixgbe_set_vmolr(hw, vf, true);
hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
/* disable hide VLAN on X550 */ /* disable hide VLAN on X550 */
if (hw->mac.type >= ixgbe_mac_X550) if (hw->mac.type >= ixgbe_mac_X550)
...@@ -1525,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, ...@@ -1525,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 regval;
if (vf >= adapter->num_vfs) if (vf >= adapter->num_vfs)
return -EINVAL; return -EINVAL;
adapter->vfinfo[vf].spoofchk_enabled = setting; adapter->vfinfo[vf].spoofchk_enabled = setting;
regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); /* configure MAC spoofing */
regval &= ~(1 << vf_target_shift); hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
regval |= (setting << vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); /* configure VLAN spoofing */
hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
if (adapter->vfinfo[vf].vlan_count) {
vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT; /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); * calling set_ethertype_anti_spoofing for each VF in loop below
regval &= ~(1 << vf_target_shift); */
regval |= (setting << vf_target_shift); if (hw->mac.ops.set_ethertype_anti_spoofing) {
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
IXGBE_ETH_P_LLDP));
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
ETH_P_PAUSE));
hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
} }
return 0; return 0;
......
...@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) ...@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT); IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size + eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT); IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size); eeprom->type, eeprom->word_size);
......
...@@ -335,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) ...@@ -335,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT); IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size + eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT); IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size); eeprom->type, eeprom->word_size);
...@@ -2646,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, ...@@ -2646,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable) if (enable)
pfvfspoof |= (1 << vf_target_shift); pfvfspoof |= BIT(vf_target_shift);
else else
pfvfspoof &= ~(1 << vf_target_shift); pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
} }
...@@ -2765,7 +2765,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) ...@@ -2765,7 +2765,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync_X540(hw, hmask); ixgbe_release_swfw_sync_X540(hw, hmask);
if (status != IXGBE_ERR_TOKEN_RETRY) if (status != IXGBE_ERR_TOKEN_RETRY)
return status; return status;
udelay(FW_PHY_TOKEN_DELAY * 1000); msleep(FW_PHY_TOKEN_DELAY);
} }
return status; return status;
...@@ -2908,7 +2908,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { ...@@ -2908,7 +2908,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
.get_media_type = &ixgbe_get_media_type_X550em, .get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL, .get_san_mac_addr = NULL,
.get_wwn_prefix = NULL, .get_wwn_prefix = NULL,
.setup_link = NULL, /* defined later */ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em, .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
.get_bus_info = &ixgbe_get_bus_info_X550em, .get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em,
...@@ -2932,7 +2932,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { ...@@ -2932,7 +2932,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.setup_sfp = ixgbe_setup_sfp_modules_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em,
.acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
.release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
.setup_fc = ixgbe_setup_fc_generic, .setup_fc = ixgbe_setup_fc_x550em,
.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
}; };
......
...@@ -74,7 +74,7 @@ typedef u32 ixgbe_link_speed; ...@@ -74,7 +74,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDCTL_RLPML_EN 0x00008000 #define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */ /* DCA Control */
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
/* PSRTYPE bit definitions */ /* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010 #define IXGBE_PSRTYPE_TCPHDR 0x00000010
...@@ -296,16 +296,16 @@ struct ixgbe_adv_tx_context_desc { ...@@ -296,16 +296,16 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ #define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ #define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ #define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */
#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ #define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */
#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ #define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */
#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ #define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */
#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ #define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ #define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ #define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ #define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#endif /* _IXGBEVF_DEFINES_H_ */ #endif /* _IXGBEVF_DEFINES_H_ */
...@@ -42,65 +42,54 @@ ...@@ -42,65 +42,54 @@
#define IXGBE_ALL_RAR_ENTRIES 16 #define IXGBE_ALL_RAR_ENTRIES 16
enum {NETDEV_STATS, IXGBEVF_STATS};
struct ixgbe_stats { struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN]; char stat_string[ETH_GSTRING_LEN];
struct { int type;
int sizeof_stat; int sizeof_stat;
int stat_offset; int stat_offset;
int base_stat_offset;
int saved_reset_offset;
};
}; };
#define IXGBEVF_STAT(m, b, r) { \ #define IXGBEVF_STAT(_name, _stat) { \
.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ .stat_string = _name, \
.stat_offset = offsetof(struct ixgbevf_adapter, m), \ .type = IXGBEVF_STATS, \
.base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
.saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
} }
#define IXGBEVF_ZSTAT(m) { \ #define IXGBEVF_NETDEV_STAT(_net_stat) { \
.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ .stat_string = #_net_stat, \
.stat_offset = offsetof(struct ixgbevf_adapter, m), \ .type = NETDEV_STATS, \
.base_stat_offset = -1, \ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
.saved_reset_offset = -1 \ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
} }
static const struct ixgbe_stats ixgbe_gstrings_stats[] = { static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, IXGBEVF_NETDEV_STAT(rx_packets),
stats.saved_reset_vfgprc)}, IXGBEVF_NETDEV_STAT(tx_packets),
{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, IXGBEVF_NETDEV_STAT(rx_bytes),
stats.saved_reset_vfgptc)}, IXGBEVF_NETDEV_STAT(tx_bytes),
{"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, IXGBEVF_STAT("tx_busy", tx_busy),
stats.saved_reset_vfgorc)}, IXGBEVF_STAT("tx_restart_queue", restart_queue),
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
stats.saved_reset_vfgotc)}, IXGBEVF_NETDEV_STAT(multicast),
{"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
{"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
{"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
#ifdef BP_EXTENDED_STATS
{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
{"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
{"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
{"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
{"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
#endif
}; };
#define IXGBE_QUEUE_STATS_LEN 0 #define IXGBEVF_QUEUE_STATS_LEN ( \
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
(sizeof(struct ixgbe_stats) / sizeof(u64)))
#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Register test (offline)",
"Link test (on/offline)" "Link test (on/offline)"
}; };
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev, static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd) struct ethtool_cmd *ecmd)
...@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, ...@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
memset(p, 0, regs_len); memset(p, 0, regs_len);
regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; /* generate a number suitable for ethtool's register version */
regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */ /* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
...@@ -392,13 +382,13 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -392,13 +382,13 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
return err; return err;
} }
static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
{ {
switch (stringset) { switch (stringset) {
case ETH_SS_TEST: case ETH_SS_TEST:
return IXGBE_TEST_LEN; return IXGBEVF_TEST_LEN;
case ETH_SS_STATS: case ETH_SS_STATS:
return IXGBE_GLOBAL_STATS_LEN; return IXGBEVF_STATS_LEN;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, ...@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *base = (char *)adapter; struct rtnl_link_stats64 temp;
int i; const struct rtnl_link_stats64 *net_stats;
#ifdef BP_EXTENDED_STATS unsigned int start;
u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, struct ixgbevf_ring *ring;
tx_yields = 0, tx_cleaned = 0, tx_missed = 0; int i, j;
char *p;
for (i = 0; i < adapter->num_rx_queues; i++) { ixgbevf_update_stats(adapter);
rx_yields += adapter->rx_ring[i]->stats.yields; net_stats = dev_get_stats(netdev, &temp);
rx_cleaned += adapter->rx_ring[i]->stats.cleaned; for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
rx_yields += adapter->rx_ring[i]->stats.yields; switch (ixgbevf_gstrings_stats[i].type) {
} case NETDEV_STATS:
p = (char *)net_stats +
ixgbevf_gstrings_stats[i].stat_offset;
break;
case IXGBEVF_STATS:
p = (char *)adapter +
ixgbevf_gstrings_stats[i].stat_offset;
break;
default:
data[i] = 0;
continue;
}
for (i = 0; i < adapter->num_tx_queues; i++) { data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
tx_yields += adapter->tx_ring[i]->stats.yields; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
tx_yields += adapter->tx_ring[i]->stats.yields;
} }
adapter->bp_rx_yields = rx_yields; /* populate Tx queue data */
adapter->bp_rx_cleaned = rx_cleaned; for (j = 0; j < adapter->num_tx_queues; j++) {
adapter->bp_rx_missed = rx_missed; ring = adapter->tx_ring[j];
if (!ring) {
data[i++] = 0;
data[i++] = 0;
#ifdef BP_EXTENDED_STATS
data[i++] = 0;
data[i++] = 0;
data[i++] = 0;
#endif
continue;
}
adapter->bp_tx_yields = tx_yields; do {
adapter->bp_tx_cleaned = tx_cleaned; start = u64_stats_fetch_begin_irq(&ring->syncp);
adapter->bp_tx_missed = tx_missed; data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i + 1] = ring->stats.misses;
data[i + 2] = ring->stats.cleaned;
i += 3;
#endif #endif
}
ixgbevf_update_stats(adapter); /* populate Rx queue data */
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { for (j = 0; j < adapter->num_rx_queues; j++) {
char *p = base + ixgbe_gstrings_stats[i].stat_offset; ring = adapter->rx_ring[j];
char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; if (!ring) {
char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; data[i++] = 0;
data[i++] = 0;
if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { #ifdef BP_EXTENDED_STATS
if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) data[i++] = 0;
data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; data[i++] = 0;
else data[i++] = 0;
data[i] = *(u64 *)p; #endif
} else { continue;
if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
else
data[i] = *(u32 *)p;
} }
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i + 1] = ring->stats.misses;
data[i + 2] = ring->stats.cleaned;
i += 3;
#endif
} }
} }
static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data) u8 *data)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data; char *p = (char *)data;
int i; int i;
switch (stringset) { switch (stringset) {
case ETH_SS_TEST: case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test, memcpy(data, *ixgbe_gstrings_test,
IXGBE_TEST_LEN * ETH_GSTRING_LEN); IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
break; break;
case ETH_SS_STATS: case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
memcpy(p, ixgbe_gstrings_stats[i].stat_string, memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
for (i = 0; i < adapter->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef BP_EXTENDED_STATS
sprintf(p, "tx_queue_%u_bp_napi_yield", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef BP_EXTENDED_STATS
sprintf(p, "rx_queue_%u_bp_poll_yield", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* BP_EXTENDED_STATS */
}
break; break;
} }
} }
......
...@@ -166,10 +166,10 @@ struct ixgbevf_ring { ...@@ -166,10 +166,10 @@ struct ixgbevf_ring {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define IXGBE_TX_FLAGS_CSUM (u32)(1) #define IXGBE_TX_FLAGS_CSUM BIT(0)
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) #define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) #define IXGBE_TX_FLAGS_TSO BIT(2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) #define IXGBE_TX_FLAGS_IPV4 BIT(3)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
...@@ -422,16 +422,6 @@ struct ixgbevf_adapter { ...@@ -422,16 +422,6 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count; unsigned int tx_ring_count;
unsigned int rx_ring_count; unsigned int rx_ring_count;
#ifdef BP_EXTENDED_STATS
u64 bp_rx_yields;
u64 bp_rx_cleaned;
u64 bp_rx_missed;
u64 bp_tx_yields;
u64 bp_tx_cleaned;
u64 bp_tx_missed;
#endif
u8 __iomem *io_addr; /* Mainly for iounmap use */ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed; u32 link_speed;
bool link_up; bool link_up;
......
...@@ -1056,7 +1056,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ...@@ -1056,7 +1056,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state)) !test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter, ixgbevf_irq_enable_queues(adapter,
1 << q_vector->v_idx); BIT(q_vector->v_idx));
return 0; return 0;
} }
...@@ -1158,14 +1158,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ...@@ -1158,14 +1158,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
} }
/* add q_vector eims value to global eims_enable_mask */ /* add q_vector eims value to global eims_enable_mask */
adapter->eims_enable_mask |= 1 << v_idx; adapter->eims_enable_mask |= BIT(v_idx);
ixgbevf_write_eitr(q_vector); ixgbevf_write_eitr(q_vector);
} }
ixgbevf_set_ivar(adapter, -1, 1, v_idx); ixgbevf_set_ivar(adapter, -1, 1, v_idx);
/* setup eims_other and add value to global eims_enable_mask */ /* setup eims_other and add value to global eims_enable_mask */
adapter->eims_other = 1 << v_idx; adapter->eims_other = BIT(v_idx);
adapter->eims_enable_mask |= adapter->eims_other; adapter->eims_enable_mask |= adapter->eims_other;
} }
...@@ -1589,8 +1589,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, ...@@ -1589,8 +1589,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (8 << 16); /* WTHRESH = 8 */ txdctl |= (8 << 16); /* WTHRESH = 8 */
/* Setting PTHRESH to 32 both improves performance */ /* Setting PTHRESH to 32 both improves performance */
txdctl |= (1 << 8) | /* HTHRESH = 1 */ txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */ 32; /* PTHRESH = 32 */
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
...@@ -1646,7 +1646,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) ...@@ -1646,7 +1646,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
IXGBE_PSRTYPE_L2HDR; IXGBE_PSRTYPE_L2HDR;
if (adapter->num_rx_queues > 1) if (adapter->num_rx_queues > 1)
psrtype |= 1 << 29; psrtype |= BIT(29);
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
} }
...@@ -2056,7 +2056,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) ...@@ -2056,7 +2056,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) { while (api[idx] != ixgbe_mbox_api_unknown) {
err = ixgbevf_negotiate_api_version(hw, api[idx]); err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
if (!err) if (!err)
break; break;
idx++; idx++;
...@@ -2797,7 +2797,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) ...@@ -2797,7 +2797,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *qv = adapter->q_vector[i]; struct ixgbevf_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring) if (qv->rx.ring || qv->tx.ring)
eics |= 1 << i; eics |= BIT(i);
} }
/* Cause software interrupt to ensure rings are cleaned */ /* Cause software interrupt to ensure rings are cleaned */
...@@ -3272,9 +3272,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3272,9 +3272,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first, struct ixgbevf_tx_buffer *first,
u8 *hdr_len) u8 *hdr_len)
{ {
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd; union {
u32 mss_l4len_idx, l4len; struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} ip;
union {
struct tcphdr *tcp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -3287,49 +3296,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3287,49 +3296,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (err < 0) if (err < 0)
return err; return err;
ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
if (first->protocol == htons(ETH_P_IP)) { /* initialize outer IP header fields */
struct iphdr *iph = ip_hdr(skb); if (ip.v4->version == 4) {
/* IP header will have to cancel out any data that
iph->tot_len = 0; * is not a part of the outer IP header
iph->check = 0; */
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, ip.v4->check = csum_fold(csum_add(lco_csum(skb),
iph->daddr, 0, csum_unfold(l4.tcp->check)));
IPPROTO_TCP,
0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO | first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4; IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) { } else {
ipv6_hdr(skb)->payload_len = 0; ip.v6->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
first->tx_flags |= IXGBE_TX_FLAGS_TSO | first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM; IXGBE_TX_FLAGS_CSUM;
} }
/* compute header lengths */ /* determine offset of inner transport header */
l4len = tcp_hdrlen(skb); l4_offset = l4.hdr - skb->data;
*hdr_len += l4len;
*hdr_len = skb_transport_offset(skb) + l4len; /* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* update GSO size and bytecount with header size */ /* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs; first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len; first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 1 as index for TSO */ /* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
...@@ -3422,7 +3435,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, ...@@ -3422,7 +3435,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
/* use index 1 context for TSO/FSO/FCOE */ /* use index 1 context for TSO/FSO/FCOE */
if (tx_flags & IXGBE_TX_FLAGS_TSO) if (tx_flags & IXGBE_TX_FLAGS_TSO)
olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it /* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running * always is for case where virtual functions are running
...@@ -3870,6 +3883,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, ...@@ -3870,6 +3883,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats; return stats;
} }
#define IXGBEVF_MAX_MAC_HDR_LEN 127
#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
static netdev_features_t
ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
mac_hdr_len = skb_network_header(skb) - skb->data;
if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_TSO |
NETIF_F_TSO6);
/* We can only support IPV4 TSO in tunnels if we can mangle the
* inner IP ID field, so strip TSO if MANGLEID is not supported.
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
features &= ~NETIF_F_TSO;
return features;
}
static const struct net_device_ops ixgbevf_netdev_ops = { static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open, .ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close, .ndo_stop = ixgbevf_close,
...@@ -3888,7 +3935,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { ...@@ -3888,7 +3935,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll, .ndo_poll_controller = ixgbevf_netpoll,
#endif #endif
.ndo_features_check = passthru_features_check, .ndo_features_check = ixgbevf_features_check,
}; };
static void ixgbevf_assign_netdev_ops(struct net_device *dev) static void ixgbevf_assign_netdev_ops(struct net_device *dev)
...@@ -3999,23 +4046,31 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3999,23 +4046,31 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM | NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC; NETIF_F_SCTP_CRC;
netdev->features = netdev->hw_features | #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO_GRE_CSUM | \
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GSO_IPIP | \
NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_GSO_SIT | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
netdev->vlan_features |= NETIF_F_SG | netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
NETIF_F_TSO | netdev->hw_features |= NETIF_F_GSO_PARTIAL |
NETIF_F_TSO6 | IXGBEVF_GSO_PARTIAL_FEATURES;
NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC;
netdev->mpls_features |= NETIF_F_HW_CSUM; netdev->features = netdev->hw_features;
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
if (pci_using_dac) if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= netdev->vlan_features;
/* set this bit last since it cannot be part of vlan_features */
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_UNICAST_FLT;
if (IXGBE_REMOVED(hw->hw_addr)) { if (IXGBE_REMOVED(hw->hw_addr)) {
......
...@@ -670,11 +670,11 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) ...@@ -670,11 +670,11 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
} }
/** /**
* ixgbevf_negotiate_api_version - Negotiate supported API version * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @api: integer containing requested API version * @api: integer containing requested API version
**/ **/
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
{ {
int err; int err;
u32 msg[3]; u32 msg[3];
...@@ -769,6 +769,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { ...@@ -769,6 +769,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.stop_adapter = ixgbevf_stop_hw_vf, .stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf, .setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf, .check_link = ixgbevf_check_mac_link_vf,
.negotiate_api_version = ixgbevf_negotiate_api_version_vf,
.set_rar = ixgbevf_set_rar_vf, .set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode, .update_xcast_mode = ixgbevf_update_xcast_mode,
......
...@@ -51,6 +51,7 @@ struct ixgbe_mac_operations { ...@@ -51,6 +51,7 @@ struct ixgbe_mac_operations {
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *);
s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
/* Link */ /* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
...@@ -208,7 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, ...@@ -208,7 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc); unsigned int *default_tc);
int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment