Commit c215dae4 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-01-09

This series contains updates to ixgbe and ixgbevf only.

Emil fixes an issue with "wake on LAN"(WoL) where we need to ensure we
enable the reception of multicast packets so that WoL works for IPv6
magic packets.  Cleaned up code no longer needed with the update to
adaptive ITR.

Paul update the driver to advertise the highest capable link speed
when a module gets inserted.  Also extended the displaying of firmware
version to include the iSCSI and OEM block in the EEPROM to better
identify firmware versions/images.

Tonghao Zhang cleans up a code comment that no longer applies since
InterruptThrottleRate has been removed from the driver.

Alex fixes SR-IOV and MACVLAN offload interaction, where the MACVLAN
offload was incorrectly configuring several filters with the wrong
pool value which resulted in MACLVAN interfaces not being able to
receive traffic that had to pass over the physical interface.  Fixed
transmit hangs and dropped receive frames when the number of VFs
changed.  Added support for RSS on MACVLAN pools for X550 devices.
Fixed up the MACVLAN limitations so we can now support 63 offloaded
devices.  Cleaned up MACVLAN code that is no longer needed with the
recent changes and fixes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 61ad6408 68ae7424
......@@ -333,7 +333,6 @@ struct ixgbe_ring {
struct net_device *netdev; /* netdev ring belongs to */
struct bpf_prog *xdp_prog;
struct device *dev; /* device for DMA mapping */
struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
......@@ -397,8 +396,7 @@ enum ixgbe_ring_f_enum {
#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 31
#define IXGBE_MAX_DCBMACVLANS 8
#define IXGBE_MAX_MACVLANS 63
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
......@@ -723,8 +721,7 @@ struct ixgbe_adapter {
u16 bridge_mode;
u16 eeprom_verh;
u16 eeprom_verl;
char eeprom_id[NVM_VER_SIZE];
u16 eeprom_cap;
u32 interrupt_event;
......@@ -768,7 +765,8 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
/* Bitmask indicating in use pools */
DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
#define IXGBE_MAX_LINK_HANDLE 10
struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
......
......@@ -4028,6 +4028,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
return 0;
}
/**
* ixgbe_get_orom_version - Return option ROM from EEPROM
*
* @hw: pointer to hardware structure
* @nvm_ver: pointer to output structure
*
* if valid option ROM version, nvm_ver->or_valid set to true
* else nvm_ver->or_valid is false.
**/
void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver)
{
u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
nvm_ver->or_valid = false;
/* Option Rom may or may not be present. Start with pointer */
hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
/* make sure offset is valid */
if (offset == 0x0 || offset == NVM_INVALID_PTR)
return;
hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
/* option rom exists and is valid */
if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
eeprom_cfg_blkl == NVM_VER_INVALID ||
eeprom_cfg_blkh == NVM_VER_INVALID)
return;
nvm_ver->or_valid = true;
nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
(eeprom_cfg_blkh >> NVM_OROM_SHIFT);
nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
}
/**
* ixgbe_get_oem_prod_version Etrack ID from EEPROM
*
* @hw: pointer to hardware structure
* @nvm_ver: pointer to output structure
*
* if valid OEM product version, nvm_ver->oem_valid set to true
* else nvm_ver->oem_valid is false.
**/
void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver)
{
u16 rel_num, prod_ver, mod_len, cap, offset;
nvm_ver->oem_valid = false;
hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
/* Return is offset to OEM Product Version block is invalid */
if (offset == 0x0 && offset == NVM_INVALID_PTR)
return;
/* Read product version block */
hw->eeprom.ops.read(hw, offset, &mod_len);
hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
/* Return if OEM product version block is invalid */
if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
(cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
return;
hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
/* Return if version is invalid */
if ((rel_num | prod_ver) == 0x0 ||
rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
return;
nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
nvm_ver->oem_release = rel_num;
nvm_ver->oem_valid = true;
}
/**
* ixgbe_get_etk_id - Return Etrack ID from EEPROM
*
* @hw: pointer to hardware structure
* @nvm_ver: pointer to output structure
*
* word read errors will return 0xFFFF
**/
void ixgbe_get_etk_id(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver)
{
u16 etk_id_l, etk_id_h;
if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
etk_id_l = NVM_VER_INVALID;
if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
etk_id_h = NVM_VER_INVALID;
/* The word order for the version format is determined by high order
* word bit 15.
*/
if ((etk_id_h & NVM_ETK_VALID) == 0) {
nvm_ver->etk_id = etk_id_h;
nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
} else {
nvm_ver->etk_id = etk_id_l;
nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
}
}
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
{
u32 rxctrl;
......
......@@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_get_etk_id(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
......
......@@ -1014,16 +1014,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 nvm_track_id;
strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ixgbe_driver_version,
sizeof(drvinfo->version));
nvm_track_id = (adapter->eeprom_verh << 16) |
adapter->eeprom_verl;
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
nvm_track_id);
strlcpy(drvinfo->fw_version, adapter->eeprom_id,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
......
......@@ -1034,11 +1034,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
ixgbe_driver_version);
/* Firmware Version */
snprintf(info->firmware_version,
sizeof(info->firmware_version),
"0x%08x",
(adapter->eeprom_verh << 16) |
adapter->eeprom_verl);
strlcpy(info->firmware_version, adapter->eeprom_id,
sizeof(info->firmware_version));
/* Model */
if (hw->mac.type == ixgbe_mac_82599EB) {
......
......@@ -350,6 +350,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
/* limit VMDq instances on the PF by number of Tx queues */
vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
......@@ -512,12 +515,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
/* limit l2fwd RSS based on total Tx queue limit */
rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
......@@ -525,7 +530,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
if (vmdq_i > 32) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
......@@ -701,7 +706,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_xdp_queues = 0;
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_pools = 1;
adapter->num_rx_queues_per_pool = 1;
#ifdef CONFIG_IXGBE_DCB
......
......@@ -227,9 +227,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
struct ixgbe_hw *hw = &adapter->hw;
u32 gpie;
u32 vmdctl;
int rss;
/* set num VFs to 0 to prevent access to vfinfo */
......@@ -271,18 +268,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
pci_disable_sriov(adapter->pdev);
#endif
/* turn off device IOV mode */
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
/* set default pool back to 0 */
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
IXGBE_WRITE_FLUSH(hw);
/* Disable VMDq flag so device will be set in VM mode */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
......@@ -305,10 +290,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err = 0;
u8 num_tc;
int i;
int pre_existing_vfs = pci_num_vf(dev);
int err = 0, num_rx_pools, i, limit;
u8 num_tc;
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
err = ixgbe_disable_sriov(adapter);
......@@ -331,22 +315,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* other values out of range.
*/
num_tc = netdev_get_num_tc(adapter->netdev);
num_rx_pools = adapter->num_rx_pools;
limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
if (num_tc > 4) {
if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
return -EPERM;
}
} else if ((num_tc > 1) && (num_tc <= 4)) {
if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
return -EPERM;
}
} else {
if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
return -EPERM;
}
if (num_vfs > (limit - num_rx_pools)) {
e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
num_tc, num_rx_pools - 1, limit - num_rx_pools);
return -EPERM;
}
err = __ixgbe_enable_sriov(adapter, num_vfs);
......@@ -378,13 +354,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
int err;
#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
int prev_num_vf = pci_num_vf(dev);
#endif
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
#ifdef CONFIG_PCI_IOV
if (!err && current_flags != adapter->flags)
if (!err && (current_flags != adapter->flags ||
prev_num_vf != pci_num_vf(dev)))
ixgbe_sriov_reinit(adapter);
#endif
......
......@@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data {
struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
};
#define NVM_OROM_OFFSET 0x17
#define NVM_OROM_BLK_LOW 0x83
#define NVM_OROM_BLK_HI 0x84
#define NVM_OROM_PATCH_MASK 0xFF
#define NVM_OROM_SHIFT 8
#define NVM_VER_MASK 0x00FF /* version mask */
#define NVM_VER_SHIFT 8 /* version bit shift */
#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
#define NVM_ETK_OFF_HI 0x2E /* version high order word */
#define NVM_ETK_SHIFT 16 /* high version word shift */
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETK_VALID 0x8000
#define NVM_INVALID_PTR 0xFFFF
#define NVM_VER_SIZE 32 /* version sting size */
struct ixgbe_nvm_version {
u32 etk_id;
u8 nvm_major;
u16 nvm_minor;
u8 nvm_id;
bool oem_valid;
u8 oem_major;
u8 oem_minor;
u16 oem_release;
bool or_valid;
u8 or_major;
u16 or_build;
u8 or_patch;
};
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
#define IXGBE_EICS 0x00808
......
......@@ -1896,10 +1896,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
unsigned int flags = netdev->flags;
int xcast_mode;
xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
(flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
/* request the most inclusive mode we need */
if (flags & IFF_PROMISC)
xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment