Commit 41a1d04b authored by Jesse Brandeburg's avatar Jesse Brandeburg Committed by Jeff Kirsher

i40e: use BIT and BIT_ULL macros

Use macros for abstracting (1 << foo) to BIT(foo)
and (1ULL << foo64) to BIT_ULL(foo64) in order to match
better with kernel requirements.

NOTE: the adminq_cmd.h file was not modified on purpose because
of the dependency upon firmware for that file.

Change-ID: I73ee2e48c880d671948aad19bd53ca6b2ac558fc
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: default avatarCatherine Sullivan <catherine.sullivan@intel.com>
Tested-by: default avatarJim Young <james.m.young@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent f1c7e72e
...@@ -98,7 +98,7 @@ ...@@ -98,7 +98,7 @@
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
/* Ethtool Private Flags */ /* Ethtool Private Flags */
#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) #define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
#define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
...@@ -289,35 +289,35 @@ struct i40e_pf { ...@@ -289,35 +289,35 @@ struct i40e_pf {
struct work_struct service_task; struct work_struct service_task;
u64 flags; u64 flags;
#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1) #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3) #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4) #define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5) #define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8) #define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9) #define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
#ifdef I40E_FCOE #ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11) #define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */ #endif /* I40E_FCOE */
#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12) #define I40E_FLAG_IN_NETPOLL BIT_ULL(12)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13) #define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17) #define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18) #define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19) #define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20) #define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21) #define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22) #define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
#define I40E_FLAG_PTP (u64)(1 << 25) #define I40E_FLAG_PTP BIT_ULL(25)
#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#ifdef CONFIG_I40E_VXLAN #ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) #define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27)
#endif #endif
#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) #define I40E_FLAG_PORT_ID_VALID BIT_ULL(28)
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) #define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
/* tracks features that get auto disabled by errors */ /* tracks features that get auto disabled by errors */
...@@ -443,8 +443,8 @@ struct i40e_vsi { ...@@ -443,8 +443,8 @@ struct i40e_vsi {
u32 current_netdev_flags; u32 current_netdev_flags;
unsigned long state; unsigned long state;
#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0) #define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
#define I40E_VSI_FLAG_VEB_OWNER (1<<1) #define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags; unsigned long flags;
struct list_head mac_filter_list; struct list_head mac_filter_list;
......
...@@ -1393,9 +1393,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) ...@@ -1393,9 +1393,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
blink = false; blink = false;
if (blink) if (blink)
gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else else
gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
break; break;
......
...@@ -58,9 +58,9 @@ ...@@ -58,9 +58,9 @@
#define I40E_IEEE_ETS_MAXTC_SHIFT 0 #define I40E_IEEE_ETS_MAXTC_SHIFT 0
#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) #define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
#define I40E_IEEE_ETS_CBS_SHIFT 6 #define I40E_IEEE_ETS_CBS_SHIFT 6
#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT) #define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
#define I40E_IEEE_ETS_WILLING_SHIFT 7 #define I40E_IEEE_ETS_WILLING_SHIFT 7
#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT) #define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
#define I40E_IEEE_ETS_PRIO_0_SHIFT 0 #define I40E_IEEE_ETS_PRIO_0_SHIFT 0
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) #define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4 #define I40E_IEEE_ETS_PRIO_1_SHIFT 4
...@@ -79,9 +79,9 @@ ...@@ -79,9 +79,9 @@
#define I40E_IEEE_PFC_CAP_SHIFT 0 #define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) #define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6 #define I40E_IEEE_PFC_MBC_SHIFT 6
#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT) #define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
#define I40E_IEEE_PFC_WILLING_SHIFT 7 #define I40E_IEEE_PFC_WILLING_SHIFT 7
#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT) #define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
/* Defines for IEEE APP TLV */ /* Defines for IEEE APP TLV */
#define I40E_IEEE_APP_SEL_SHIFT 0 #define I40E_IEEE_APP_SEL_SHIFT 0
......
...@@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) ...@@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
/* Set up all the App TLVs if DCBx is negotiated */ /* Set up all the App TLVs if DCBx is negotiated */
for (i = 0; i < dcbxcfg->numapps; i++) { for (i = 0; i < dcbxcfg->numapps; i++) {
prio = dcbxcfg->app[i].priority; prio = dcbxcfg->app[i].priority;
tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]); tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
/* Add APP only if the TC is enabled for this VSI */ /* Add APP only if the TC is enabled for this VSI */
if (tc_map & vsi->tc_config.enabled_tc) { if (tc_map & vsi->tc_config.enabled_tc) {
......
...@@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) ...@@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
pf->auto_disable_flags |= flag; pf->auto_disable_flags |= flag;
} }
dev_info(&pf->pdev->dev, "requesting a PF reset\n"); dev_info(&pf->pdev->dev, "requesting a PF reset\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
} }
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
...@@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} }
} else if (strncmp(cmd_buf, "pfr", 3) == 0) { } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) { } else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) { } else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "empr", 4) == 0) { } else if (strncmp(cmd_buf, "empr", 4) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) { } else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address; u32 address;
......
...@@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) ...@@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val); ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
if (!ret_code && if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) { BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
ret_code = i40e_validate_nvm_checksum(hw, NULL); return i40e_validate_nvm_checksum(hw, NULL);
} else { else
ret_code = I40E_ERR_DIAG_TEST_FAILED; return I40E_ERR_DIAG_TEST_FAILED;
}
return ret_code;
} }
...@@ -1017,7 +1017,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev) ...@@ -1017,7 +1017,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
/* register returns value in power of 2, 64Kbyte chunks. */ /* register returns value in power of 2, 64Kbyte chunks. */
val = (64 * 1024) * (1 << val); val = (64 * 1024) * BIT(val);
return val; return val;
} }
...@@ -1470,11 +1470,11 @@ static int i40e_get_ts_info(struct net_device *dev, ...@@ -1470,11 +1470,11 @@ static int i40e_get_ts_info(struct net_device *dev,
else else
info->phc_index = -1; info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT); BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0; return 0;
} }
...@@ -1590,7 +1590,7 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1590,7 +1590,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* indicate we're in test mode */ /* indicate we're in test mode */
dev_close(netdev); dev_close(netdev);
else else
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
/* Link test performed before hardware reset /* Link test performed before hardware reset
* so autoneg doesn't interfere with test result * so autoneg doesn't interfere with test result
...@@ -1612,7 +1612,7 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1612,7 +1612,7 @@ static void i40e_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state); clear_bit(__I40E_TESTING, &pf->state);
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
if (if_running) if (if_running)
dev_open(netdev); dev_open(netdev);
...@@ -1645,7 +1645,7 @@ static void i40e_get_wol(struct net_device *netdev, ...@@ -1645,7 +1645,7 @@ static void i40e_get_wol(struct net_device *netdev,
/* NVM bit on means WoL disabled for the port */ /* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) { if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
wol->supported = 0; wol->supported = 0;
wol->wolopts = 0; wol->wolopts = 0;
} else { } else {
...@@ -1678,7 +1678,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ...@@ -1678,7 +1678,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* NVM bit on means WoL disabled for the port */ /* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
if (((1 << hw->port) & wol_nvm_bits)) if (BIT(hw->port) & wol_nvm_bits)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* only magic packet is supported */ /* only magic packet is supported */
...@@ -2024,10 +2024,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) ...@@ -2024,10 +2024,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW: case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: case 0:
hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break; break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -2036,10 +2036,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) ...@@ -2036,10 +2036,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW: case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: case 0:
hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break; break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -2048,12 +2048,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) ...@@ -2048,12 +2048,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW: case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: case 0:
hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break; break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -2062,12 +2062,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) ...@@ -2062,12 +2062,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW: case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: case 0:
hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break; break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -2080,7 +2080,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) ...@@ -2080,7 +2080,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) || if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3)) (nfc->data & RXH_L4_B_2_3))
return -EINVAL; return -EINVAL;
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
break; break;
case AH_ESP_V6_FLOW: case AH_ESP_V6_FLOW:
case AH_V6_FLOW: case AH_V6_FLOW:
...@@ -2089,15 +2089,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) ...@@ -2089,15 +2089,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) || if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3)) (nfc->data & RXH_L4_B_2_3))
return -EINVAL; return -EINVAL;
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
break; break;
case IPV4_FLOW: case IPV4_FLOW:
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break; break;
case IPV6_FLOW: case IPV6_FLOW:
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) ...@@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
/* enable FCoE hash filter */ /* enable FCoE hash filter */
val = rd32(hw, I40E_PFQF_HENA(1)); val = rd32(hw, I40E_PFQF_HENA(1));
val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32); val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32); val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
wr32(hw, I40E_PFQF_HENA(1), val); wr32(hw, I40E_PFQF_HENA(1), val);
...@@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) ...@@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
pf->num_fcoe_qps = I40E_DEFAULT_FCOE; pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
/* Reserve 4K DDP contexts and 20K filter size for FCoE */ /* Reserve 4K DDP contexts and 20K filter size for FCoE */
pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) * pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
I40E_DMA_CNTX_BASE_SIZE; I40E_DMA_CNTX_BASE_SIZE;
pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num + pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
(1 << I40E_HASH_FILTER_SIZE_16K) * BIT(I40E_HASH_FILTER_SIZE_16K) *
I40E_HASH_FILTER_BASE_SIZE; I40E_HASH_FILTER_BASE_SIZE;
/* FCoE object: max 16K filter buckets and 4K DMA contexts */ /* FCoE object: max 16K filter buckets and 4K DMA contexts */
...@@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) ...@@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app.protocolid == ETH_P_FCOE) { app.protocolid == ETH_P_FCOE) {
tc = dcbcfg->etscfg.prioritytable[app.priority]; tc = dcbcfg->etscfg.prioritytable[app.priority];
enabled_tc |= (1 << tc); enabled_tc |= BIT(tc);
break; break;
} }
} }
......
...@@ -59,9 +59,9 @@ ...@@ -59,9 +59,9 @@
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1) (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \ #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \
(1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \ #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \
(1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \ #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \
I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
......
...@@ -127,8 +127,8 @@ struct i40e_hmc_info { ...@@ -127,8 +127,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \
...@@ -147,7 +147,7 @@ struct i40e_hmc_info { ...@@ -147,7 +147,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \
......
...@@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, ...@@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
obj->cnt = txq_num; obj->cnt = txq_num;
obj->base = 0; obj->base = 0;
size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
obj->size = (u64)1 << size_exp; obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */ /* validate values requested by driver don't exceed HMC capacity */
if (txq_num > obj->max_cnt) { if (txq_num > obj->max_cnt) {
...@@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, ...@@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
obj->base = i40e_align_l2obj_base(obj->base); obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
obj->size = (u64)1 << size_exp; obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */ /* validate values requested by driver don't exceed HMC capacity */
if (rxq_num > obj->max_cnt) { if (rxq_num > obj->max_cnt) {
...@@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, ...@@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
obj->base = i40e_align_l2obj_base(obj->base); obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
obj->size = (u64)1 << size_exp; obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */ /* validate values requested by driver don't exceed HMC capacity */
if (fcoe_cntx_num > obj->max_cnt) { if (fcoe_cntx_num > obj->max_cnt) {
...@@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, ...@@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
obj->base = i40e_align_l2obj_base(obj->base); obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
obj->size = (u64)1 << size_exp; obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */ /* validate values requested by driver don't exceed HMC capacity */
if (fcoe_filt_num > obj->max_cnt) { if (fcoe_filt_num > obj->max_cnt) {
...@@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits, ...@@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits,
/* prepare the bits and mask */ /* prepare the bits and mask */
shift_width = ce_info->lsb % 8; shift_width = ce_info->lsb % 8;
mask = ((u8)1 << ce_info->width) - 1; mask = BIT(ce_info->width) - 1;
src_byte = *from; src_byte = *from;
src_byte &= mask; src_byte &= mask;
...@@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits, ...@@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits,
/* prepare the bits and mask */ /* prepare the bits and mask */
shift_width = ce_info->lsb % 8; shift_width = ce_info->lsb % 8;
mask = ((u16)1 << ce_info->width) - 1; mask = BIT(ce_info->width) - 1;
/* don't swizzle the bits until after the mask because the mask bits /* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines * will be in a different bit position on big endian machines
...@@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits, ...@@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits,
* to 5 bits so the shift will do nothing * to 5 bits so the shift will do nothing
*/ */
if (ce_info->width < 32) if (ce_info->width < 32)
mask = ((u32)1 << ce_info->width) - 1; mask = BIT(ce_info->width) - 1;
else else
mask = ~(u32)0; mask = ~(u32)0;
...@@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits, ...@@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits,
* to 6 bits so the shift will do nothing * to 6 bits so the shift will do nothing
*/ */
if (ce_info->width < 64) if (ce_info->width < 64)
mask = ((u64)1 << ce_info->width) - 1; mask = BIT_ULL(ce_info->width) - 1;
else else
mask = ~(u64)0; mask = ~(u64)0;
......
This diff is collapsed.
...@@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) ...@@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT); I40E_GLNVM_GENS_SR_SIZE_SHIFT);
/* Switching to words (sr_size contains power of 2KB) */ /* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */ /* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA); fla = rd32(hw, I40E_GLNVM_FLA);
...@@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, ...@@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw); ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) { if (!ret_code) {
/* Write the address and start reading */ /* Write the address and start reading */
sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
(1 << I40E_GLNVM_SRCTL_START_SHIFT); BIT(I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg); wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
/* Poll I40E_GLNVM_SRCTL until the done bit is set */ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
......
...@@ -43,9 +43,8 @@ ...@@ -43,9 +43,8 @@
#define I40E_PTP_10GB_INCVAL 0x0333333333ULL #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
#define I40E_PTP_1GB_INCVAL 0x2000000000ULL #define I40E_PTP_1GB_INCVAL 0x2000000000ULL
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \ #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
/** /**
...@@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) ...@@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
if (!(prttsyn_stat & (1 << index))) if (!(prttsyn_stat & BIT(index)))
return; return;
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
......
...@@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
(I40E_DEBUG_FD & pf->hw.debug_mask)) (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
...@@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, ...@@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
dev_info(&pdev->dev, dev_info(&pdev->dev,
"FD filter programming failed due to incorrect filter parameters\n"); "FD filter programming failed due to incorrect filter parameters\n");
} }
} else if (error == } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
(0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask) if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
rx_desc->wb.qword0.hi_dword.fd_id); rx_desc->wb.qword0.hi_dword.fd_id);
...@@ -1363,7 +1362,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1363,7 +1362,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return; return;
/* did the hardware decode the packet and checksum? */ /* did the hardware decode the packet and checksum? */
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return; return;
/* both known and outer_ip must be set for the below code to work */ /* both known and outer_ip must be set for the below code to work */
...@@ -1378,25 +1377,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1378,25 +1377,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6 = true; ipv6 = true;
if (ipv4 && if (ipv4 &&
(rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail; goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */ /* likely incorrect csum if alternate IP extension headers found */
if (ipv6 && if (ipv6 &&
rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */ /* don't increment checksum err here, non-fatal err */
return; return;
/* there was some L4 error, count error and punt packet to the stack */ /* there was some L4 error, count error and punt packet to the stack */
if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail; goto checksum_fail;
/* handle packets that were not able to be checksummed due /* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute * to arrival speed, in this case the stack can compute
* the csum. * the csum.
*/ */
if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return; return;
/* If VXLAN traffic has an outer UDPv4 checksum we need to check /* If VXLAN traffic has an outer UDPv4 checksum we need to check
...@@ -1520,7 +1519,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1520,7 +1519,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT; I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break; break;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
...@@ -1561,8 +1560,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1561,8 +1560,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT; I40E_RXD_QW1_ERROR_SHIFT;
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT; I40E_RXD_QW1_PTYPE_SHIFT;
...@@ -1614,7 +1613,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1614,7 +1613,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i); I40E_RX_INCREMENT(rx_ring, i);
if (unlikely( if (unlikely(
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer; struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i]; next_buffer = &rx_ring->rx_bi[i];
...@@ -1624,7 +1623,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1624,7 +1623,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
} }
/* ERR_MASK will only have valid bits if EOP set */ /* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
continue; continue;
} }
...@@ -1646,7 +1645,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1646,7 +1645,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0; : 0;
#ifdef I40E_FCOE #ifdef I40E_FCOE
...@@ -1707,7 +1706,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1707,7 +1706,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT; I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break; break;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
...@@ -1730,7 +1729,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1730,7 +1729,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT; I40E_RXD_QW1_ERROR_SHIFT;
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT; I40E_RXD_QW1_PTYPE_SHIFT;
...@@ -1748,13 +1747,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1748,13 +1747,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i); I40E_RX_INCREMENT(rx_ring, i);
if (unlikely( if (unlikely(
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
rx_ring->rx_stats.non_eop_descs++; rx_ring->rx_stats.non_eop_descs++;
continue; continue;
} }
/* ERR_MASK will only have valid bits if EOP set */ /* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the /* TODO: shouldn't we increment a counter indicating the
* drop? * drop?
...@@ -1779,7 +1778,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1779,7 +1778,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0; : 0;
#ifdef I40E_FCOE #ifdef I40E_FCOE
......
...@@ -66,17 +66,17 @@ enum i40e_dyn_idx_t { ...@@ -66,17 +66,17 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */ /* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \ #define I40E_DEFAULT_RSS_HENA ( \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */ #define I40E_RXBUFFER_512 512 /* Used for packet split */
...@@ -129,17 +129,17 @@ enum i40e_dyn_idx_t { ...@@ -129,17 +129,17 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4 #define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_CSUM (u32)(1) #define I40E_TX_FLAGS_CSUM BIT(0)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) #define I40E_TX_FLAGS_HW_VLAN BIT(1)
#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) #define I40E_TX_FLAGS_SW_VLAN BIT(2)
#define I40E_TX_FLAGS_TSO (u32)(1 << 3) #define I40E_TX_FLAGS_TSO BIT(3)
#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) #define I40E_TX_FLAGS_IPV4 BIT(4)
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) #define I40E_TX_FLAGS_IPV6 BIT(5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FCCRC BIT(6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FSO BIT(7)
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) #define I40E_TX_FLAGS_TSYN BIT(8)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) #define I40E_TX_FLAGS_FD_SB BIT(9)
#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
......
...@@ -611,7 +611,7 @@ enum i40e_rx_desc_status_bits { ...@@ -611,7 +611,7 @@ enum i40e_rx_desc_status_bits {
}; };
#define I40E_RXD_QW1_STATUS_SHIFT 0 #define I40E_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ #define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT) << I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
...@@ -619,8 +619,8 @@ enum i40e_rx_desc_status_bits { ...@@ -619,8 +619,8 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ #define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values { enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0, I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
...@@ -754,8 +754,7 @@ enum i40e_rx_ptype_payload_layer { ...@@ -754,8 +754,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT) I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ #define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits { enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
...@@ -931,12 +930,12 @@ enum i40e_tx_ctx_desc_eipt_offload { ...@@ -931,12 +930,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ #define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
...@@ -1001,8 +1000,8 @@ enum i40e_filter_program_desc_fd_status { ...@@ -1001,8 +1000,8 @@ enum i40e_filter_program_desc_fd_status {
}; };
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ #define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
...@@ -1020,8 +1019,7 @@ enum i40e_filter_program_desc_pcmd { ...@@ -1020,8 +1019,7 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ #define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT) I40E_TXD_FLTR_QW1_CMD_SHIFT)
......
...@@ -277,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, ...@@ -277,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
} }
tempmap = vecmap->rxq_map; tempmap = vecmap->rxq_map;
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 << linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id));
vsi_queue_id));
} }
tempmap = vecmap->txq_map; tempmap = vecmap->txq_map;
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 << linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id vsi_queue_id + 1));
+ 1));
} }
next_q = find_first_bit(&linklistmap, next_q = find_first_bit(&linklistmap,
...@@ -332,7 +330,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, ...@@ -332,7 +330,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
reg = (vector_id) | reg = (vector_id) |
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
(1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
(itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
wr32(hw, reg_idx, reg); wr32(hw, reg_idx, reg);
} }
...@@ -897,7 +895,7 @@ void i40e_free_vfs(struct i40e_pf *pf) ...@@ -897,7 +895,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
for (vf_id = 0; vf_id < tmp; vf_id++) { for (vf_id = 0; vf_id < tmp; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
} }
} }
clear_bit(__I40E_VF_DISABLE, &pf->state); clear_bit(__I40E_VF_DISABLE, &pf->state);
...@@ -1983,9 +1981,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) ...@@ -1983,9 +1981,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* read GLGEN_VFLRSTAT register to find out the flr VFs */ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id]; vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & (1 << bit_idx)) { if (reg & BIT(bit_idx)) {
/* clear the bit in GLGEN_VFLRSTAT */ /* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
if (!test_bit(__I40E_DOWN, &pf->state)) if (!test_bit(__I40E_DOWN, &pf->state))
i40e_reset_vf(vf, true); i40e_reset_vf(vf, true);
......
...@@ -127,8 +127,8 @@ struct i40e_hmc_info { ...@@ -127,8 +127,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \
...@@ -147,7 +147,7 @@ struct i40e_hmc_info { ...@@ -147,7 +147,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \
......
...@@ -850,7 +850,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -850,7 +850,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return; return;
/* did the hardware decode the packet and checksum? */ /* did the hardware decode the packet and checksum? */
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return; return;
/* both known and outer_ip must be set for the below code to work */ /* both known and outer_ip must be set for the below code to work */
...@@ -865,25 +865,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -865,25 +865,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6 = true; ipv6 = true;
if (ipv4 && if (ipv4 &&
(rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail; goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */ /* likely incorrect csum if alternate IP extension headers found */
if (ipv6 && if (ipv6 &&
rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */ /* don't increment checksum err here, non-fatal err */
return; return;
/* there was some L4 error, count error and punt packet to the stack */ /* there was some L4 error, count error and punt packet to the stack */
if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail; goto checksum_fail;
/* handle packets that were not able to be checksummed due /* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute * to arrival speed, in this case the stack can compute
* the csum. * the csum.
*/ */
if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return; return;
/* If VXLAN traffic has an outer UDPv4 checksum we need to check /* If VXLAN traffic has an outer UDPv4 checksum we need to check
...@@ -1004,7 +1004,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1004,7 +1004,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT; I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break; break;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
...@@ -1040,8 +1040,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1040,8 +1040,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT; I40E_RXD_QW1_ERROR_SHIFT;
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT; I40E_RXD_QW1_PTYPE_SHIFT;
...@@ -1093,7 +1093,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1093,7 +1093,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i); I40E_RX_INCREMENT(rx_ring, i);
if (unlikely( if (unlikely(
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer; struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i]; next_buffer = &rx_ring->rx_bi[i];
...@@ -1103,7 +1103,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1103,7 +1103,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
} }
/* ERR_MASK will only have valid bits if EOP set */ /* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
continue; continue;
} }
...@@ -1118,7 +1118,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1118,7 +1118,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0; : 0;
#ifdef I40E_FCOE #ifdef I40E_FCOE
...@@ -1179,7 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1179,7 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT; I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break; break;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
...@@ -1197,7 +1197,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1197,7 +1197,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT; I40E_RXD_QW1_ERROR_SHIFT;
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT; I40E_RXD_QW1_PTYPE_SHIFT;
...@@ -1215,13 +1215,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1215,13 +1215,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i); I40E_RX_INCREMENT(rx_ring, i);
if (unlikely( if (unlikely(
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
rx_ring->rx_stats.non_eop_descs++; rx_ring->rx_stats.non_eop_descs++;
continue; continue;
} }
/* ERR_MASK will only have valid bits if EOP set */ /* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the /* TODO: shouldn't we increment a counter indicating the
* drop? * drop?
...@@ -1239,7 +1239,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1239,7 +1239,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0; : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag); i40e_receive_skb(rx_ring, skb, vlan_tag);
...@@ -1314,8 +1314,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -1314,8 +1314,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
if (!test_bit(__I40E_DOWN, &vsi->state)) if (!test_bit(__I40E_DOWN, &vsi->state))
wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
} else { } else {
i40evf_irq_enable_queues(vsi->back, i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
1 << q_vector->v_idx);
} }
} }
......
...@@ -66,17 +66,17 @@ enum i40e_dyn_idx_t { ...@@ -66,17 +66,17 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */ /* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \ #define I40E_DEFAULT_RSS_HENA ( \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */ #define I40E_RXBUFFER_512 512 /* Used for packet split */
...@@ -129,16 +129,16 @@ enum i40e_dyn_idx_t { ...@@ -129,16 +129,16 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4 #define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_CSUM (u32)(1) #define I40E_TX_FLAGS_CSUM BIT(0)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) #define I40E_TX_FLAGS_HW_VLAN BIT(1)
#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) #define I40E_TX_FLAGS_SW_VLAN BIT(2)
#define I40E_TX_FLAGS_TSO (u32)(1 << 3) #define I40E_TX_FLAGS_TSO BIT(3)
#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) #define I40E_TX_FLAGS_IPV4 BIT(4)
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) #define I40E_TX_FLAGS_IPV6 BIT(5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FCCRC BIT(6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FSO BIT(7)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) #define I40E_TX_FLAGS_FD_SB BIT(9)
#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
......
...@@ -605,7 +605,7 @@ enum i40e_rx_desc_status_bits { ...@@ -605,7 +605,7 @@ enum i40e_rx_desc_status_bits {
}; };
#define I40E_RXD_QW1_STATUS_SHIFT 0 #define I40E_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ #define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT) << I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
...@@ -613,8 +613,8 @@ enum i40e_rx_desc_status_bits { ...@@ -613,8 +613,8 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ #define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values { enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0, I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
...@@ -748,8 +748,7 @@ enum i40e_rx_ptype_payload_layer { ...@@ -748,8 +748,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT) I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ #define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits { enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
...@@ -925,12 +924,12 @@ enum i40e_tx_ctx_desc_eipt_offload { ...@@ -925,12 +924,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ #define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
...@@ -995,8 +994,8 @@ enum i40e_filter_program_desc_fd_status { ...@@ -995,8 +994,8 @@ enum i40e_filter_program_desc_fd_status {
}; };
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ #define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
...@@ -1014,8 +1013,7 @@ enum i40e_filter_program_desc_pcmd { ...@@ -1014,8 +1013,7 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ #define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT) I40E_TXD_FLTR_QW1_CMD_SHIFT)
......
...@@ -207,17 +207,17 @@ struct i40evf_adapter { ...@@ -207,17 +207,17 @@ struct i40evf_adapter {
struct msix_entry *msix_entries; struct msix_entry *msix_entries;
u32 flags; u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) #define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) #define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3) #define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4) #define I40EVF_FLAG_IN_NETPOLL BIT(4)
#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) #define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) #define I40EVF_FLAG_MQ_CAPABLE BIT(6)
#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) #define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8) #define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9) #define I40EVF_FLAG_RESET_PENDING BIT(9)
#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10) #define I40EVF_FLAG_RESET_NEEDED BIT(10)
/* duplcates for common code */ /* duplcates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0
...@@ -225,16 +225,16 @@ struct i40evf_adapter { ...@@ -225,16 +225,16 @@ struct i40evf_adapter {
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
/* flags for admin queue service task */ /* flags for admin queue service task */
u32 aq_required; u32 aq_required;
#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) #define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) #define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) #define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) #define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) #define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) #define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) #define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) #define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
#define I40EVF_FLAG_AQ_GET_CONFIG (u32)(1 << 10) #define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
/* OS defined structs */ /* OS defined structs */
struct net_device *netdev; struct net_device *netdev;
......
...@@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, ...@@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
switch (cmd->flow_type) { switch (cmd->flow_type) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break; break;
case UDP_V4_FLOW: case UDP_V4_FLOW:
if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break; break;
...@@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, ...@@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
break; break;
case TCP_V6_FLOW: case TCP_V6_FLOW:
if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break; break;
case UDP_V6_FLOW: case UDP_V6_FLOW:
if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break; break;
...@@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, ...@@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case TCP_V4_FLOW: case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: case 0:
hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break; break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, ...@@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case TCP_V6_FLOW: case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: case 0:
hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break; break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, ...@@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case UDP_V4_FLOW: case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: case 0:
hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break; break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, ...@@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case UDP_V6_FLOW: case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0: case 0:
hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break; break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3): case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, ...@@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
if ((nfc->data & RXH_L4_B_0_1) || if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3)) (nfc->data & RXH_L4_B_2_3))
return -EINVAL; return -EINVAL;
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
break; break;
case AH_ESP_V6_FLOW: case AH_ESP_V6_FLOW:
case AH_V6_FLOW: case AH_V6_FLOW:
...@@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, ...@@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
if ((nfc->data & RXH_L4_B_0_1) || if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3)) (nfc->data & RXH_L4_B_2_3))
return -EINVAL; return -EINVAL;
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
break; break;
case IPV4_FLOW: case IPV4_FLOW:
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break; break;
case IPV6_FLOW: case IPV6_FLOW:
hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -240,7 +240,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) ...@@ -240,7 +240,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
int i; int i;
for (i = 1; i < adapter->num_msix_vectors; i++) { for (i = 1; i < adapter->num_msix_vectors; i++) {
if (mask & (1 << (i - 1))) { if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK | I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
...@@ -268,7 +268,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) ...@@ -268,7 +268,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
} }
for (i = 1; i < adapter->num_msix_vectors; i++) { for (i = 1; i < adapter->num_msix_vectors; i++) {
if (mask & (1 << i)) { if (mask & BIT(i)) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
...@@ -377,7 +377,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) ...@@ -377,7 +377,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.count++; q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY; q_vector->tx.latency_range = I40E_LOW_LATENCY;
q_vector->num_ringpairs++; q_vector->num_ringpairs++;
q_vector->ring_mask |= (1 << t_idx); q_vector->ring_mask |= BIT(t_idx);
} }
/** /**
...@@ -2320,7 +2320,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2320,7 +2320,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &adapter->hw; hw = &adapter->hw;
hw->back = adapter; hw->back = adapter;
adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
adapter->state = __I40EVF_STARTUP; adapter->state = __I40EVF_STARTUP;
/* Call save state here because it relies on the adapter struct. */ /* Call save state here because it relies on the adapter struct. */
......
...@@ -294,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) ...@@ -294,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
} }
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues; vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
...@@ -319,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) ...@@ -319,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
} }
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues; vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment