Commit 39438490 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2019-10-21

This series contains updates to e1000e and igc only.

Sasha adds stream control transmission protocol (SCTP) CRC checksum
support for igc.  Also added S0ix support to the e1000e driver.  Then
added multicast support by adding the address list to the MTA table and
providing the option for IPv6 address for igc.  In addition, added
receive checksum support to igc as well.  Lastly, cleaned up some code
that was not fully implemented yet for the VLAN filter table array.

v2: Dropped patch 1 & 2 from the original series.  Patch 1 is being sent
    to 'net' tree as a fix and patch 2 implementation needs to be
    re-worked.  Updated the patch to add support for S0ix to fix the
    reverse Xmas tree issues and made the entry/exit functions void
    since they constantly returned success.  All based on community
    feedback.
v3: Cleaned up patch 4 of the series based on feedback from the
    community.  Cleaned up a stray comma in a code comment and removed
    the 'inline' of a function that would be inlined by the compiler
    anyways.
====================
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
parents 985fd98a 70332577
...@@ -6294,6 +6294,174 @@ static void e1000e_flush_lpic(struct pci_dev *pdev) ...@@ -6294,6 +6294,174 @@ static void e1000e_flush_lpic(struct pci_dev *pdev)
pm_runtime_put_sync(netdev->dev.parent); pm_runtime_put_sync(netdev->dev.parent);
} }
/* S0ix implementation */
static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mac_data;
u16 phy_data;
/* Disable the periodic inband message,
* don't request PCIe clock in K1 page770_17[10:9] = 10b
*/
e1e_rphy(hw, HV_PM_CTRL, &phy_data);
phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
phy_data |= BIT(10);
e1e_wphy(hw, HV_PM_CTRL, phy_data);
/* Make sure we don't exit K1 every time a new packet arrives
* 772_29[5] = 1 CS_Mode_Stay_In_K1
*/
e1e_rphy(hw, I217_CGFREG, &phy_data);
phy_data |= BIT(5);
e1e_wphy(hw, I217_CGFREG, phy_data);
/* Change the MAC/PHY interface to SMBus
* Force the SMBus in PHY page769_23[0] = 1
* Force the SMBus in MAC CTRL_EXT[11] = 1
*/
e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
e1e_wphy(hw, CV_SMB_CTRL, phy_data);
mac_data = er32(CTRL_EXT);
mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_data);
/* DFT control: PHY bit: page769_20[0] = 1
* Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
*/
e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
phy_data |= BIT(0);
e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
mac_data = er32(EXTCNF_CTRL);
mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
ew32(EXTCNF_CTRL, mac_data);
/* Check MAC Tx/Rx packet buffer pointers.
* Reset MAC Tx/Rx packet buffer pointers to suppress any
* pending traffic indication that would prevent power gating.
*/
mac_data = er32(TDFH);
if (mac_data)
ew32(TDFH, 0);
mac_data = er32(TDFT);
if (mac_data)
ew32(TDFT, 0);
mac_data = er32(TDFHS);
if (mac_data)
ew32(TDFHS, 0);
mac_data = er32(TDFTS);
if (mac_data)
ew32(TDFTS, 0);
mac_data = er32(TDFPC);
if (mac_data)
ew32(TDFPC, 0);
mac_data = er32(RDFH);
if (mac_data)
ew32(RDFH, 0);
mac_data = er32(RDFT);
if (mac_data)
ew32(RDFT, 0);
mac_data = er32(RDFHS);
if (mac_data)
ew32(RDFHS, 0);
mac_data = er32(RDFTS);
if (mac_data)
ew32(RDFTS, 0);
mac_data = er32(RDFPC);
if (mac_data)
ew32(RDFPC, 0);
/* Enable the Dynamic Power Gating in the MAC */
mac_data = er32(FEXTNVM7);
mac_data |= BIT(22);
ew32(FEXTNVM7, mac_data);
/* Disable the time synchronization clock */
mac_data = er32(FEXTNVM7);
mac_data |= BIT(31);
mac_data &= ~BIT(0);
ew32(FEXTNVM7, mac_data);
/* Dynamic Power Gating Enable */
mac_data = er32(CTRL_EXT);
mac_data |= BIT(3);
ew32(CTRL_EXT, mac_data);
/* Enable the Dynamic Clock Gating in the DMA and MAC */
mac_data = er32(CTRL_EXT);
mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
ew32(CTRL_EXT, mac_data);
/* No MAC DPG gating SLP_S0 in modern standby
* Switch the logic of the lanphypc to use PMC counter
*/
mac_data = er32(FEXTNVM5);
mac_data |= BIT(7);
ew32(FEXTNVM5, mac_data);
}
static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mac_data;
u16 phy_data;
/* Disable the Dynamic Power Gating in the MAC */
mac_data = er32(FEXTNVM7);
mac_data &= 0xFFBFFFFF;
ew32(FEXTNVM7, mac_data);
/* Enable the time synchronization clock */
mac_data = er32(FEXTNVM7);
mac_data |= BIT(0);
ew32(FEXTNVM7, mac_data);
/* Disable Dynamic Power Gating */
mac_data = er32(CTRL_EXT);
mac_data &= 0xFFFFFFF7;
ew32(CTRL_EXT, mac_data);
/* Disable the Dynamic Clock Gating in the DMA and MAC */
mac_data = er32(CTRL_EXT);
mac_data &= 0xFFF7FFFF;
ew32(CTRL_EXT, mac_data);
/* Revert the lanphypc logic to use the internal Gbe counter
* and not the PMC counter
*/
mac_data = er32(FEXTNVM5);
mac_data &= 0xFFFFFF7F;
ew32(FEXTNVM5, mac_data);
/* Enable the periodic inband message,
* Request PCIe clock in K1 page770_17[10:9] =01b
*/
e1e_rphy(hw, HV_PM_CTRL, &phy_data);
phy_data &= 0xFBFF;
phy_data |= HV_PM_CTRL_K1_CLK_REQ;
e1e_wphy(hw, HV_PM_CTRL, phy_data);
/* Return back configuration
* 772_29[5] = 0 CS_Mode_Stay_In_K1
*/
e1e_rphy(hw, I217_CGFREG, &phy_data);
phy_data &= 0xFFDF;
e1e_wphy(hw, I217_CGFREG, phy_data);
/* Change the MAC/PHY interface to Kumeran
* Unforce the SMBus in PHY page769_23[0] = 0
* Unforce the SMBus in MAC CTRL_EXT[11] = 0
*/
e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1e_wphy(hw, CV_SMB_CTRL, phy_data);
mac_data = er32(CTRL_EXT);
mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_data);
}
static int e1000e_pm_freeze(struct device *dev) static int e1000e_pm_freeze(struct device *dev)
{ {
struct net_device *netdev = dev_get_drvdata(dev); struct net_device *netdev = dev_get_drvdata(dev);
...@@ -6649,7 +6817,10 @@ static int e1000e_pm_thaw(struct device *dev) ...@@ -6649,7 +6817,10 @@ static int e1000e_pm_thaw(struct device *dev)
static int e1000e_pm_suspend(struct device *dev) static int e1000e_pm_suspend(struct device *dev)
{ {
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct e1000_hw *hw = &adapter->hw;
int rc; int rc;
e1000e_flush_lpic(pdev); e1000e_flush_lpic(pdev);
...@@ -6660,14 +6831,25 @@ static int e1000e_pm_suspend(struct device *dev) ...@@ -6660,14 +6831,25 @@ static int e1000e_pm_suspend(struct device *dev)
if (rc) if (rc)
e1000e_pm_thaw(dev); e1000e_pm_thaw(dev);
/* Introduce S0ix implementation */
if (hw->mac.type >= e1000_pch_cnp)
e1000e_s0ix_entry_flow(adapter);
return rc; return rc;
} }
static int e1000e_pm_resume(struct device *dev) static int e1000e_pm_resume(struct device *dev)
{ {
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct e1000_hw *hw = &adapter->hw;
int rc; int rc;
/* Introduce S0ix implementation */
if (hw->mac.type >= e1000_pch_cnp)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev); rc = __e1000_resume(pdev);
if (rc) if (rc)
return rc; return rc;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM5 0x00014 /* Future Extended NVM 5 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ #define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
...@@ -234,4 +235,7 @@ ...@@ -234,4 +235,7 @@
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
/* PHY registers */
#define I82579_DFT_CTRL PHY_REG(769, 20)
#endif #endif
...@@ -411,7 +411,6 @@ struct igc_adapter { ...@@ -411,7 +411,6 @@ struct igc_adapter {
u32 tx_hwtstamp_timeouts; u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped; u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared; u32 rx_hwtstamp_cleared;
u32 *shadow_vfta;
u32 rss_queues; u32 rss_queues;
u32 rss_indir_tbl_init; u32 rss_indir_tbl_init;
......
...@@ -283,6 +283,9 @@ ...@@ -283,6 +283,9 @@
/* Receive Descriptor bit definitions */ /* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ #define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define IGC_RXDEXT_STATERR_CE 0x01000000 #define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000 #define IGC_RXDEXT_STATERR_SE 0x02000000
...@@ -402,4 +405,7 @@ ...@@ -402,4 +405,7 @@
#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ #define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */
#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ #define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
/* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128
#endif /* _IGC_DEFINES_H_ */ #endif /* _IGC_DEFINES_H_ */
...@@ -91,6 +91,7 @@ struct igc_mac_info { ...@@ -91,6 +91,7 @@ struct igc_mac_info {
u16 mta_reg_count; u16 mta_reg_count;
u16 uta_reg_count; u16 uta_reg_count;
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count; u16 rar_entry_count;
u8 forced_speed_duplex; u8 forced_speed_duplex;
......
...@@ -784,3 +784,107 @@ bool igc_enable_mng_pass_thru(struct igc_hw *hw) ...@@ -784,3 +784,107 @@ bool igc_enable_mng_pass_thru(struct igc_hw *hw)
out: out:
return ret_val; return ret_val;
} }
/**
* igc_hash_mc_addr - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
*
* Generates a multicast address hash value which is used to determine
* the multicast filter table array address and new table value. See
* igc_mta_set()
**/
static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
/* The portion of the address that is used for the hash table
* is determined by the mc_filter_type setting.
* The algorithm is such that there is a total of 8 bits of shifting.
* The bit_shift for a mc_filter_type of 0 represents the number of
* left-shifts where the MSB of mc_addr[5] would still fall within
* the hash_mask. Case 0 does this exactly. Since there are a total
* of 8 bits of shifting, then mc_addr[4] will shift right the
* remaining number of bits. Thus 8 - bit_shift. The rest of the
* cases are a variation of this algorithm...essentially raising the
* number of bits to shift mc_addr[5] left, while still keeping the
* 8-bit shifting total.
*
* For example, given the following Destination MAC Address and an
* MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask),
* we can see that the bit_shift for case 0 is 4. These are the hash
* values resulting from each mc_filter_type...
* [0] [1] [2] [3] [4] [5]
* 01 AA 00 12 34 56
* LSB MSB
*
* case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
* case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
* case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
* case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
*/
switch (hw->mac.mc_filter_type) {
default:
case 0:
break;
case 1:
bit_shift += 1;
break;
case 2:
bit_shift += 2;
break;
case 3:
bit_shift += 4;
break;
}
hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
(((u16)mc_addr[5]) << bit_shift)));
return hash_value;
}
/**
* igc_update_mc_addr_list - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
*
* Updates entire Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
**/
void igc_update_mc_addr_list(struct igc_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
/* clear mta_shadow */
memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
/* update mta_shadow from mc_addr_list */
for (i = 0; (u32)i < mc_addr_count; i++) {
hash_value = igc_hash_mc_addr(hw, mc_addr_list);
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += ETH_ALEN;
}
/* replace the entire MTA table */
for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]);
wrfl();
}
...@@ -29,6 +29,8 @@ s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, ...@@ -29,6 +29,8 @@ s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
u16 *duplex); u16 *duplex);
bool igc_enable_mng_pass_thru(struct igc_hw *hw); bool igc_enable_mng_pass_thru(struct igc_hw *hw);
void igc_update_mc_addr_list(struct igc_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count);
enum igc_mng_mode { enum igc_mng_mode {
igc_mng_mode_none = 0, igc_mng_mode_none = 0,
......
...@@ -795,6 +795,44 @@ static int igc_set_mac(struct net_device *netdev, void *p) ...@@ -795,6 +795,44 @@ static int igc_set_mac(struct net_device *netdev, void *p)
return 0; return 0;
} }
/**
* igc_write_mc_addr_list - write multicast addresses to MTA
* @netdev: network interface device structure
*
* Writes multicast address list to the MTA hash table.
* Returns: -ENOMEM on failure
* 0 on no addresses written
* X on writing X addresses to MTA
**/
static int igc_write_mc_addr_list(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
u8 *mta_list;
int i;
if (netdev_mc_empty(netdev)) {
/* nothing to program, so clear mc list */
igc_update_mc_addr_list(hw, NULL, 0);
return 0;
}
mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
if (!mta_list)
return -ENOMEM;
/* The shared function expects a packed array of only addresses. */
i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
igc_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
return netdev_mc_count(netdev);
}
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first, struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd, u32 vlan_macip_lens, u32 type_tucmd,
...@@ -1163,6 +1201,46 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, ...@@ -1163,6 +1201,46 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
} }
static void igc_rx_checksum(struct igc_ring *ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
/* Ignore Checksum bit is set */
if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
return;
/* Rx checksum disabled via ethtool */
if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
/* TCP/UDP checksum error bit is set */
if (igc_test_staterr(rx_desc,
IGC_RXDEXT_STATERR_TCPE |
IGC_RXDEXT_STATERR_IPE)) {
/* work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
* packets (aka let the stack check the crc32c)
*/
if (!(skb->len == 60 &&
test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
u64_stats_update_begin(&ring->rx_syncp);
ring->rx_stats.csum_err++;
u64_stats_update_end(&ring->rx_syncp);
}
/* let the stack verify checksum errors */
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
IGC_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
dev_dbg(ring->dev, "cksum success: bits %08X\n",
le32_to_cpu(rx_desc->wb.upper.status_error));
}
static inline void igc_rx_hash(struct igc_ring *ring, static inline void igc_rx_hash(struct igc_ring *ring,
union igc_adv_rx_desc *rx_desc, union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -1189,6 +1267,8 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring, ...@@ -1189,6 +1267,8 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
{ {
igc_rx_hash(rx_ring, rx_desc, skb); igc_rx_hash(rx_ring, rx_desc, skb);
igc_rx_checksum(rx_ring, rx_desc, skb);
skb_record_rx_queue(skb, rx_ring->queue_index); skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
...@@ -2518,6 +2598,110 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter, ...@@ -2518,6 +2598,110 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
IGC_MAC_STATE_QUEUE_STEERING | flags); IGC_MAC_STATE_QUEUE_STEERING | flags);
} }
/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
* 'flags' is used to indicate what kind of match is made, match is by
* default for the destination address, if matching by source address
* is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
*/
static int igc_add_mac_filter(struct igc_adapter *adapter,
const u8 *addr, const u8 queue)
{
struct igc_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count;
int i;
if (is_zero_ether_addr(addr))
return -EINVAL;
/* Search for the first empty entry in the MAC table.
* Do not touch entries at the end of the table reserved for the VF MAC
* addresses.
*/
for (i = 0; i < rar_entries; i++) {
if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
addr, 0))
continue;
ether_addr_copy(adapter->mac_table[i].addr, addr);
adapter->mac_table[i].queue = queue;
adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
igc_rar_set_index(adapter, i);
return i;
}
return -ENOSPC;
}
/* Remove a MAC filter for 'addr' directing matching traffic to
* 'queue', 'flags' is used to indicate what kind of match need to be
* removed, match is by default for the destination address, if
* matching by source address is to be removed the flag
* IGC_MAC_STATE_SRC_ADDR can be used.
*/
static int igc_del_mac_filter(struct igc_adapter *adapter,
const u8 *addr, const u8 queue)
{
struct igc_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count;
int i;
if (is_zero_ether_addr(addr))
return -EINVAL;
/* Search for matching entry in the MAC table based on given address
* and queue. Do not touch entries at the end of the table reserved
* for the VF MAC addresses.
*/
for (i = 0; i < rar_entries; i++) {
if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
continue;
if (adapter->mac_table[i].state != 0)
continue;
if (adapter->mac_table[i].queue != queue)
continue;
if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
continue;
/* When a filter for the default address is "deleted",
* we return it to its initial configuration
*/
if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
adapter->mac_table[i].state =
IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
adapter->mac_table[i].queue = 0;
} else {
adapter->mac_table[i].state = 0;
adapter->mac_table[i].queue = 0;
memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
}
igc_rar_set_index(adapter, i);
return 0;
}
return -ENOENT;
}
static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int ret;
ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
return min_t(int, ret, 0);
}
static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
return 0;
}
/** /**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -2529,6 +2713,44 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter, ...@@ -2529,6 +2713,44 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
*/ */
static void igc_set_rx_mode(struct net_device *netdev) static void igc_set_rx_mode(struct net_device *netdev)
{ {
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
int count;
/* Check for Promiscuous and All Multicast modes */
if (netdev->flags & IFF_PROMISC) {
rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
} else {
if (netdev->flags & IFF_ALLMULTI) {
rctl |= IGC_RCTL_MPE;
} else {
/* Write addresses to the MTA, if the attempt fails
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
count = igc_write_mc_addr_list(netdev);
if (count < 0)
rctl |= IGC_RCTL_MPE;
}
}
/* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
rctl |= IGC_RCTL_UPE;
/* update state of unicast and multicast */
rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
wr32(IGC_RCTL, rctl);
#if (PAGE_SIZE < 8192)
if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
rlpml = IGC_MAX_FRAME_BUILD_SKB;
#endif
wr32(IGC_RLPML, rlpml);
} }
/** /**
...@@ -3982,6 +4204,7 @@ static const struct net_device_ops igc_netdev_ops = { ...@@ -3982,6 +4204,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open, .ndo_open = igc_open,
.ndo_stop = igc_close, .ndo_stop = igc_close,
.ndo_start_xmit = igc_xmit_frame, .ndo_start_xmit = igc_xmit_frame,
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac, .ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu, .ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats, .ndo_get_stats = igc_get_stats,
...@@ -4210,7 +4433,9 @@ static int igc_probe(struct pci_dev *pdev, ...@@ -4210,7 +4433,9 @@ static int igc_probe(struct pci_dev *pdev,
goto err_sw_init; goto err_sw_init;
/* Add supported features to the features list*/ /* Add supported features to the features list*/
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM; netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
/* setup the private structure */ /* setup the private structure */
err = igc_sw_init(adapter); err = igc_sw_init(adapter);
...@@ -4348,7 +4573,6 @@ static void igc_remove(struct pci_dev *pdev) ...@@ -4348,7 +4573,6 @@ static void igc_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev); pci_release_mem_regions(pdev);
kfree(adapter->mac_table); kfree(adapter->mac_table);
kfree(adapter->shadow_vfta);
free_netdev(netdev); free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev); pci_disable_pcie_error_reporting(pdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment