Commit 168882d4 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-wangxun-more-ethtool'

Jiawen Wu says:

====================
Implement more ethtool_ops for Wangxun

Provide ethtool functions to operate pause param, ring param, coalesce
channel number and msglevel, for driver txgbe/ngbe.

v6 -> v7:
- Rebase on net-next.

v5 -> v6:
- Minor fixes address on Jakub Kicinski's comments.

v4 -> v5:
- Fix build error reported by kernel test robot.

v3 -> v4:
- Repartition the patches of phylink.
- Handle failure to allocate memory while changing ring parameters.
- Minor fixes about formatting.

v2 -> v3:
- Address comments:
  https://lore.kernel.org/all/ZW2loxTO6oKNYLew@shell.armlinux.org.uk/

v1 -> v2:
- Add phylink support for ngbe.
- Fix issue on interrupts when queue number is changed.
- Add more marco defines.
- Fix return codes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5403d39b b746dc6b
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "wx_type.h" #include "wx_type.h"
#include "wx_ethtool.h" #include "wx_ethtool.h"
#include "wx_hw.h" #include "wx_hw.h"
#include "wx_lib.h"
struct wx_stats { struct wx_stats {
char stat_string[ETH_GSTRING_LEN]; char stat_string[ETH_GSTRING_LEN];
...@@ -185,3 +186,238 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) ...@@ -185,3 +186,238 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
} }
} }
EXPORT_SYMBOL(wx_get_drvinfo); EXPORT_SYMBOL(wx_get_drvinfo);
int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);
int wx_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct wx *wx = netdev_priv(netdev);
return phylink_ethtool_ksettings_get(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_get_link_ksettings);
int wx_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct wx *wx = netdev_priv(netdev);
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);
void wx_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct wx *wx = netdev_priv(netdev);
phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);
int wx_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct wx *wx = netdev_priv(netdev);
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);
void wx_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct wx *wx = netdev_priv(netdev);
ring->rx_max_pending = WX_MAX_RXD;
ring->tx_max_pending = WX_MAX_TXD;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = wx->rx_ring_count;
ring->tx_pending = wx->tx_ring_count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
EXPORT_SYMBOL(wx_get_ringparam);
int wx_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct wx *wx = netdev_priv(netdev);
ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
/* only valid if in constant ITR mode */
if (wx->rx_itr_setting <= 1)
ec->rx_coalesce_usecs = wx->rx_itr_setting;
else
ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
/* if in mixed tx/rx queues per vector mode, report only rx settings */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
return 0;
/* only valid if in constant ITR mode */
if (wx->tx_itr_setting <= 1)
ec->tx_coalesce_usecs = wx->tx_itr_setting;
else
ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;
return 0;
}
EXPORT_SYMBOL(wx_get_coalesce);
int wx_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct wx *wx = netdev_priv(netdev);
u16 tx_itr_param, rx_itr_param;
struct wx_q_vector *q_vector;
u16 max_eitr;
int i;
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
/* reject Tx specific changes in case of mixed RxTx vectors */
if (ec->tx_coalesce_usecs)
return -EOPNOTSUPP;
}
if (ec->tx_max_coalesced_frames_irq)
wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
if (wx->mac.type == wx_mac_sp)
max_eitr = WX_SP_MAX_EITR;
else
max_eitr = WX_EM_MAX_EITR;
if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
return -EINVAL;
if (ec->rx_coalesce_usecs > 1)
wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
else
wx->rx_itr_setting = ec->rx_coalesce_usecs;
if (wx->rx_itr_setting == 1)
rx_itr_param = WX_20K_ITR;
else
rx_itr_param = wx->rx_itr_setting;
if (ec->tx_coalesce_usecs > 1)
wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
else
wx->tx_itr_setting = ec->tx_coalesce_usecs;
if (wx->tx_itr_setting == 1) {
if (wx->mac.type == wx_mac_sp)
tx_itr_param = WX_12K_ITR;
else
tx_itr_param = WX_20K_ITR;
} else {
tx_itr_param = wx->tx_itr_setting;
}
/* mixed Rx/Tx */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
wx->tx_itr_setting = wx->rx_itr_setting;
for (i = 0; i < wx->num_q_vectors; i++) {
q_vector = wx->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
q_vector->itr = tx_itr_param;
else
/* rx only or mixed */
q_vector->itr = rx_itr_param;
wx_write_eitr(q_vector);
}
return 0;
}
EXPORT_SYMBOL(wx_set_coalesce);
static unsigned int wx_max_channels(struct wx *wx)
{
unsigned int max_combined;
if (!wx->msix_q_entries) {
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
if (wx->mac.type == wx_mac_sp)
max_combined = 63;
else
max_combined = 8;
}
return max_combined;
}
void wx_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct wx *wx = netdev_priv(dev);
/* report maximum channels */
ch->max_combined = wx_max_channels(wx);
/* report info for other vector */
if (wx->msix_q_entries) {
ch->max_other = 1;
ch->other_count = 1;
}
/* record RSS queues */
ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
}
EXPORT_SYMBOL(wx_get_channels);
int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
unsigned int count = ch->combined_count;
struct wx *wx = netdev_priv(dev);
/* verify other_count has not changed */
if (ch->other_count != 1)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
if (count > wx_max_channels(wx))
return -EINVAL;
wx->ring_feature[RING_F_RSS].limit = count;
return 0;
}
EXPORT_SYMBOL(wx_set_channels);
u32 wx_get_msglevel(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
return wx->msg_enable;
}
EXPORT_SYMBOL(wx_get_msglevel);
void wx_set_msglevel(struct net_device *netdev, u32 data)
{
struct wx *wx = netdev_priv(netdev);
wx->msg_enable = data;
}
EXPORT_SYMBOL(wx_set_msglevel);
...@@ -13,4 +13,31 @@ void wx_get_mac_stats(struct net_device *netdev, ...@@ -13,4 +13,31 @@ void wx_get_mac_stats(struct net_device *netdev,
void wx_get_pause_stats(struct net_device *netdev, void wx_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats); struct ethtool_pause_stats *stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info);
int wx_nway_reset(struct net_device *netdev);
int wx_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd);
int wx_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd);
void wx_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause);
int wx_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause);
void wx_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack);
int wx_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack);
int wx_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack);
void wx_get_channels(struct net_device *dev,
struct ethtool_channels *ch);
int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
#endif /* _WX_ETHTOOL_H_ */ #endif /* _WX_ETHTOOL_H_ */
...@@ -149,9 +149,9 @@ void wx_irq_disable(struct wx *wx) ...@@ -149,9 +149,9 @@ void wx_irq_disable(struct wx *wx)
int vector; int vector;
for (vector = 0; vector < wx->num_q_vectors; vector++) for (vector = 0; vector < wx->num_q_vectors; vector++)
synchronize_irq(wx->msix_entries[vector].vector); synchronize_irq(wx->msix_q_entries[vector].vector);
synchronize_irq(wx->msix_entries[vector].vector); synchronize_irq(wx->msix_entry->vector);
} else { } else {
synchronize_irq(pdev->irq); synchronize_irq(pdev->irq);
} }
...@@ -1158,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx) ...@@ -1158,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx)
wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
} }
#define WX_ETH_FRAMING 20
/**
* wx_hpbthresh - calculate high water mark for flow control
*
* @wx: board private structure to calculate for
**/
static int wx_hpbthresh(struct wx *wx)
{
struct net_device *dev = wx->netdev;
int link, tc, kb, marker;
u32 dv_id, rx_pba;
/* Calculate max LAN frame size */
link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING;
tc = link;
/* Calculate delay value for device */
dv_id = WX_DV(link, tc);
/* Delay value is calculated in bit times convert to KB */
kb = WX_BT2KB(dv_id);
rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
marker = rx_pba - kb;
/* It is possible that the packet buffer is not large enough
* to provide required headroom. In this case throw an error
* to user and a do the best we can.
*/
if (marker < 0) {
dev_warn(&wx->pdev->dev,
"Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n");
marker = tc + 1;
}
return marker;
}
/**
* wx_lpbthresh - calculate low water mark for flow control
*
* @wx: board private structure to calculate for
**/
static int wx_lpbthresh(struct wx *wx)
{
struct net_device *dev = wx->netdev;
u32 dv_id;
int tc;
/* Calculate max LAN frame size */
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* Calculate delay value for device */
dv_id = WX_LOW_DV(tc);
/* Delay value is calculated in bit times convert to KB */
return WX_BT2KB(dv_id);
}
/**
* wx_pbthresh_setup - calculate and setup high low water marks
*
* @wx: board private structure to calculate for
**/
static void wx_pbthresh_setup(struct wx *wx)
{
wx->fc.high_water = wx_hpbthresh(wx);
wx->fc.low_water = wx_lpbthresh(wx);
/* Low water marks must not be larger than high water marks */
if (wx->fc.low_water > wx->fc.high_water)
wx->fc.low_water = 0;
}
static void wx_configure_port(struct wx *wx) static void wx_configure_port(struct wx *wx)
{ {
u32 value, i; u32 value, i;
...@@ -1522,6 +1597,72 @@ static void wx_restore_vlan(struct wx *wx) ...@@ -1522,6 +1597,72 @@ static void wx_restore_vlan(struct wx *wx)
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid); wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
} }
static void wx_store_reta(struct wx *wx)
{
u8 *indir_tbl = wx->rss_indir_tbl;
u32 reta = 0;
u32 i;
/* Fill out the redirection table as follows:
* - 8 bit wide entries containing 4 bit RSS index
*/
for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
reta |= indir_tbl[i] << (i & 0x3) * 8;
if ((i & 3) == 3) {
wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
reta = 0;
}
}
}
static void wx_setup_reta(struct wx *wx)
{
u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
u32 random_key_size = WX_RSS_KEY_SIZE / 4;
u32 i, j;
/* Fill out hash function seeds */
for (i = 0; i < random_key_size; i++)
wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
/* Fill out redirection table */
memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
if (j == rss_i)
j = 0;
wx->rss_indir_tbl[i] = j;
}
wx_store_reta(wx);
}
static void wx_setup_mrqc(struct wx *wx)
{
u32 rss_field = 0;
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
/* Perform hash on these packet types */
rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
WX_RDB_RA_CTL_RSS_IPV4_TCP |
WX_RDB_RA_CTL_RSS_IPV4_UDP |
WX_RDB_RA_CTL_RSS_IPV6 |
WX_RDB_RA_CTL_RSS_IPV6_TCP |
WX_RDB_RA_CTL_RSS_IPV6_UDP;
netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
wx_setup_reta(wx);
if (wx->rss_enabled)
rss_field |= WX_RDB_RA_CTL_RSS_EN;
wr32(wx, WX_RDB_RA_CTL, rss_field);
}
/** /**
* wx_configure_rx - Configure Receive Unit after Reset * wx_configure_rx - Configure Receive Unit after Reset
* @wx: pointer to private structure * @wx: pointer to private structure
...@@ -1554,6 +1695,8 @@ void wx_configure_rx(struct wx *wx) ...@@ -1554,6 +1695,8 @@ void wx_configure_rx(struct wx *wx)
wr32(wx, WX_PSR_CTL, psrctl); wr32(wx, WX_PSR_CTL, psrctl);
} }
wx_setup_mrqc(wx);
/* set_rx_buffer_len must be called before ring initialization */ /* set_rx_buffer_len must be called before ring initialization */
wx_set_rx_buffer_len(wx); wx_set_rx_buffer_len(wx);
...@@ -1584,6 +1727,7 @@ static void wx_configure_isb(struct wx *wx) ...@@ -1584,6 +1727,7 @@ static void wx_configure_isb(struct wx *wx)
void wx_configure(struct wx *wx) void wx_configure(struct wx *wx)
{ {
wx_set_rxpba(wx); wx_set_rxpba(wx);
wx_pbthresh_setup(wx);
wx_configure_port(wx); wx_configure_port(wx);
wx_set_rx_mode(wx->netdev); wx_set_rx_mode(wx->netdev);
...@@ -1750,6 +1894,28 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) ...@@ -1750,6 +1894,28 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
} }
EXPORT_SYMBOL(wx_get_pcie_msix_counts); EXPORT_SYMBOL(wx_get_pcie_msix_counts);
/**
* wx_init_rss_key - Initialize wx RSS key
* @wx: device handle
*
* Allocates and initializes the RSS key if it is not allocated.
**/
static int wx_init_rss_key(struct wx *wx)
{
u32 *rss_key;
if (!wx->rss_key) {
rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
if (unlikely(!rss_key))
return -ENOMEM;
netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
wx->rss_key = rss_key;
}
return 0;
}
int wx_sw_init(struct wx *wx) int wx_sw_init(struct wx *wx)
{ {
struct pci_dev *pdev = wx->pdev; struct pci_dev *pdev = wx->pdev;
...@@ -1777,14 +1943,23 @@ int wx_sw_init(struct wx *wx) ...@@ -1777,14 +1943,23 @@ int wx_sw_init(struct wx *wx)
wx->subsystem_device_id = swab16((u16)ssid); wx->subsystem_device_id = swab16((u16)ssid);
} }
err = wx_init_rss_key(wx);
if (err < 0) {
wx_err(wx, "rss key allocation failed\n");
return err;
}
wx->mac_table = kcalloc(wx->mac.num_rar_entries, wx->mac_table = kcalloc(wx->mac.num_rar_entries,
sizeof(struct wx_mac_addr), sizeof(struct wx_mac_addr),
GFP_KERNEL); GFP_KERNEL);
if (!wx->mac_table) { if (!wx->mac_table) {
wx_err(wx, "mac_table allocation failed\n"); wx_err(wx, "mac_table allocation failed\n");
kfree(wx->rss_key);
return -ENOMEM; return -ENOMEM;
} }
wx->msix_in_use = false;
return 0; return 0;
} }
EXPORT_SYMBOL(wx_sw_init); EXPORT_SYMBOL(wx_sw_init);
...@@ -2003,6 +2178,102 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) ...@@ -2003,6 +2178,102 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
} }
EXPORT_SYMBOL(wx_vlan_rx_kill_vid); EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
{
u16 reg_idx = ring->reg_idx;
u32 srrctl;
srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
srrctl |= WX_PX_RR_CFG_DROP_EN;
wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
}
static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
{
u16 reg_idx = ring->reg_idx;
u32 srrctl;
srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
srrctl &= ~WX_PX_RR_CFG_DROP_EN;
wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
}
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
{
u16 pause_time = WX_DEFAULT_FCPAUSE;
u32 mflcn_reg, fccfg_reg, reg;
u32 fcrtl, fcrth;
int i;
/* Low water mark of zero causes XOFF floods */
if (tx_pause && wx->fc.high_water) {
if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
wx_err(wx, "Invalid water mark configuration\n");
return -EINVAL;
}
}
/* Disable any previous flow control settings */
mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE;
fccfg_reg = rd32(wx, WX_RDB_RFCC);
fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X;
if (rx_pause)
mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE;
if (tx_pause)
fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X;
/* Set 802.3x based flow control settings. */
wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
wr32(wx, WX_RDB_RFCC, fccfg_reg);
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (tx_pause && wx->fc.high_water) {
fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
wr32(wx, WX_RDB_RFCL, fcrtl);
fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
} else {
wr32(wx, WX_RDB_RFCL, 0);
/* In order to prevent Tx hangs when the internal Tx
* switch is enabled we must set the high water mark
* to the Rx packet buffer size - 24KB. This allows
* the Tx switch to function even under heavy Rx
* workloads.
*/
fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
}
wr32(wx, WX_RDB_RFCH, fcrth);
/* Configure pause time */
reg = pause_time * 0x00010001;
wr32(wx, WX_RDB_RFCV, reg);
/* Configure flow control refresh threshold value */
wr32(wx, WX_RDB_RFCRT, pause_time / 2);
/* We should set the drop enable bit if:
* Number of Rx queues > 1 and flow control is disabled
*
* This allows us to avoid head of line blocking for security
* and performance reasons.
*/
if (wx->num_rx_queues > 1 && !tx_pause) {
for (i = 0; i < wx->num_rx_queues; i++)
wx_enable_rx_drop(wx, wx->rx_ring[i]);
} else {
for (i = 0; i < wx->num_rx_queues; i++)
wx_disable_rx_drop(wx, wx->rx_ring[i]);
}
return 0;
}
EXPORT_SYMBOL(wx_fc_enable);
/** /**
* wx_update_stats - Update the board statistics counters. * wx_update_stats - Update the board statistics counters.
* @wx: board private structure * @wx: board private structure
......
...@@ -41,6 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); ...@@ -41,6 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx); int wx_sw_init(struct wx *wx);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
void wx_update_stats(struct wx *wx); void wx_update_stats(struct wx *wx);
void wx_clear_hw_cntrs(struct wx *wx); void wx_clear_hw_cntrs(struct wx *wx);
......
...@@ -1568,8 +1568,14 @@ EXPORT_SYMBOL(wx_napi_disable_all); ...@@ -1568,8 +1568,14 @@ EXPORT_SYMBOL(wx_napi_disable_all);
**/ **/
static void wx_set_rss_queues(struct wx *wx) static void wx_set_rss_queues(struct wx *wx)
{ {
wx->num_rx_queues = wx->mac.max_rx_queues; struct wx_ring_feature *f;
wx->num_tx_queues = wx->mac.max_tx_queues;
/* set mask for 16 queue limit of RSS */
f = &wx->ring_feature[RING_F_RSS];
f->indices = f->limit;
wx->num_rx_queues = f->limit;
wx->num_tx_queues = f->limit;
} }
static void wx_set_num_queues(struct wx *wx) static void wx_set_num_queues(struct wx *wx)
...@@ -1595,35 +1601,51 @@ static int wx_acquire_msix_vectors(struct wx *wx) ...@@ -1595,35 +1601,51 @@ static int wx_acquire_msix_vectors(struct wx *wx)
struct irq_affinity affd = {0, }; struct irq_affinity affd = {0, };
int nvecs, i; int nvecs, i;
nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); /* We start by asking for one vector per queue pair */
nvecs = max(wx->num_rx_queues, wx->num_tx_queues);
nvecs = min_t(int, nvecs, num_online_cpus());
nvecs = min_t(int, nvecs, wx->mac.max_msix_vectors);
wx->msix_entries = kcalloc(nvecs, wx->msix_q_entries = kcalloc(nvecs, sizeof(struct msix_entry),
sizeof(struct msix_entry),
GFP_KERNEL); GFP_KERNEL);
if (!wx->msix_entries) if (!wx->msix_q_entries)
return -ENOMEM; return -ENOMEM;
/* One for non-queue interrupts */
nvecs += 1;
if (!wx->msix_in_use) {
wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
GFP_KERNEL);
if (!wx->msix_entry) {
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
return -ENOMEM;
}
}
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
nvecs, nvecs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&affd); &affd);
if (nvecs < 0) { if (nvecs < 0) {
wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
kfree(wx->msix_entries); kfree(wx->msix_q_entries);
wx->msix_entries = NULL; wx->msix_q_entries = NULL;
kfree(wx->msix_entry);
wx->msix_entry = NULL;
return nvecs; return nvecs;
} }
wx->msix_entry->entry = 0;
wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
nvecs -= 1;
for (i = 0; i < nvecs; i++) { for (i = 0; i < nvecs; i++) {
wx->msix_entries[i].entry = i; wx->msix_q_entries[i].entry = i;
wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
} }
/* one for msix_other */
nvecs -= 1;
wx->num_q_vectors = nvecs; wx->num_q_vectors = nvecs;
wx->num_rx_queues = nvecs;
wx->num_tx_queues = nvecs;
return 0; return 0;
} }
...@@ -1645,9 +1667,11 @@ static int wx_set_interrupt_capability(struct wx *wx) ...@@ -1645,9 +1667,11 @@ static int wx_set_interrupt_capability(struct wx *wx)
if (ret == 0 || (ret == -ENOMEM)) if (ret == 0 || (ret == -ENOMEM))
return ret; return ret;
wx->num_rx_queues = 1; /* Disable RSS */
wx->num_tx_queues = 1; dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
wx->num_q_vectors = 1; wx->ring_feature[RING_F_RSS].limit = 1;
wx_set_num_queues(wx);
/* minmum one for queue, one for misc*/ /* minmum one for queue, one for misc*/
nvecs = 1; nvecs = 1;
...@@ -1905,8 +1929,12 @@ void wx_reset_interrupt_capability(struct wx *wx) ...@@ -1905,8 +1929,12 @@ void wx_reset_interrupt_capability(struct wx *wx)
return; return;
if (pdev->msix_enabled) { if (pdev->msix_enabled) {
kfree(wx->msix_entries); kfree(wx->msix_q_entries);
wx->msix_entries = NULL; wx->msix_q_entries = NULL;
if (!wx->msix_in_use) {
kfree(wx->msix_entry);
wx->msix_entry = NULL;
}
} }
pci_free_irq_vectors(wx->pdev); pci_free_irq_vectors(wx->pdev);
} }
...@@ -1978,7 +2006,7 @@ void wx_free_irq(struct wx *wx) ...@@ -1978,7 +2006,7 @@ void wx_free_irq(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) { for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector]; struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_entries[vector]; struct msix_entry *entry = &wx->msix_q_entries[vector];
/* free only the irqs that were actually requested */ /* free only the irqs that were actually requested */
if (!q_vector->rx.ring && !q_vector->tx.ring) if (!q_vector->rx.ring && !q_vector->tx.ring)
...@@ -1988,7 +2016,7 @@ void wx_free_irq(struct wx *wx) ...@@ -1988,7 +2016,7 @@ void wx_free_irq(struct wx *wx)
} }
if (wx->mac.type == wx_mac_em) if (wx->mac.type == wx_mac_em)
free_irq(wx->msix_entries[vector].vector, wx); free_irq(wx->msix_entry->vector, wx);
} }
EXPORT_SYMBOL(wx_free_irq); EXPORT_SYMBOL(wx_free_irq);
...@@ -2065,6 +2093,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, ...@@ -2065,6 +2093,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar); wr32(wx, WX_PX_MISC_IVAR, ivar);
} else { } else {
/* tx or rx causes */ /* tx or rx causes */
msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL; msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction)); index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
...@@ -2082,7 +2111,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, ...@@ -2082,7 +2111,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
* when it needs to update EITR registers at runtime. Hardware * when it needs to update EITR registers at runtime. Hardware
* specific quirks/differences are taken care of here. * specific quirks/differences are taken care of here.
*/ */
static void wx_write_eitr(struct wx_q_vector *q_vector) void wx_write_eitr(struct wx_q_vector *q_vector)
{ {
struct wx *wx = q_vector->wx; struct wx *wx = q_vector->wx;
int v_idx = q_vector->v_idx; int v_idx = q_vector->v_idx;
...@@ -2095,7 +2124,7 @@ static void wx_write_eitr(struct wx_q_vector *q_vector) ...@@ -2095,7 +2124,7 @@ static void wx_write_eitr(struct wx_q_vector *q_vector)
itr_reg |= WX_PX_ITR_CNT_WDIS; itr_reg |= WX_PX_ITR_CNT_WDIS;
wr32(wx, WX_PX_ITR(v_idx), itr_reg); wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
} }
/** /**
...@@ -2141,9 +2170,9 @@ void wx_configure_vectors(struct wx *wx) ...@@ -2141,9 +2170,9 @@ void wx_configure_vectors(struct wx *wx)
wx_write_eitr(q_vector); wx_write_eitr(q_vector);
} }
wx_set_ivar(wx, -1, 0, v_idx); wx_set_ivar(wx, -1, 0, 0);
if (pdev->msix_enabled) if (pdev->msix_enabled)
wr32(wx, WX_PX_ITR(v_idx), 1950); wr32(wx, WX_PX_ITR(0), 1950);
} }
EXPORT_SYMBOL(wx_configure_vectors); EXPORT_SYMBOL(wx_configure_vectors);
...@@ -2656,11 +2685,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -2656,11 +2685,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev); struct wx *wx = netdev_priv(netdev);
if (changed & NETIF_F_RXHASH) if (features & NETIF_F_RXHASH) {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
WX_RDB_RA_CTL_RSS_EN); WX_RDB_RA_CTL_RSS_EN);
else wx->rss_enabled = true;
} else {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0); wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
wx->rss_enabled = false;
}
if (changed & if (changed &
(NETIF_F_HW_VLAN_CTAG_RX | (NETIF_F_HW_VLAN_CTAG_RX |
...@@ -2671,4 +2703,70 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -2671,4 +2703,70 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
} }
EXPORT_SYMBOL(wx_set_features); EXPORT_SYMBOL(wx_set_features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
int i, err = 0;
/* Setup new Tx resources and free the old Tx resources in that order.
* We can then assign the new resources to the rings via a memcpy.
* The advantage to this approach is that we are guaranteed to still
* have resources even in the case of an allocation failure.
*/
if (new_tx_count != wx->tx_ring_count) {
for (i = 0; i < wx->num_tx_queues; i++) {
memcpy(&temp_ring[i], wx->tx_ring[i],
sizeof(struct wx_ring));
temp_ring[i].count = new_tx_count;
err = wx_setup_tx_resources(&temp_ring[i]);
if (err) {
wx_err(wx, "setup new tx resources failed, keep using the old config\n");
while (i) {
i--;
wx_free_tx_resources(&temp_ring[i]);
}
return;
}
}
for (i = 0; i < wx->num_tx_queues; i++) {
wx_free_tx_resources(wx->tx_ring[i]);
memcpy(wx->tx_ring[i], &temp_ring[i],
sizeof(struct wx_ring));
}
wx->tx_ring_count = new_tx_count;
}
/* Repeat the process for the Rx rings if needed */
if (new_rx_count != wx->rx_ring_count) {
for (i = 0; i < wx->num_rx_queues; i++) {
memcpy(&temp_ring[i], wx->rx_ring[i],
sizeof(struct wx_ring));
temp_ring[i].count = new_rx_count;
err = wx_setup_rx_resources(&temp_ring[i]);
if (err) {
wx_err(wx, "setup new rx resources failed, keep using the old config\n");
while (i) {
i--;
wx_free_rx_resources(&temp_ring[i]);
}
return;
}
}
for (i = 0; i < wx->num_rx_queues; i++) {
wx_free_rx_resources(wx->rx_ring[i]);
memcpy(wx->rx_ring[i], &temp_ring[i],
sizeof(struct wx_ring));
}
wx->rx_ring_count = new_rx_count;
}
}
EXPORT_SYMBOL(wx_set_ring);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx); ...@@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx);
int wx_setup_isb_resources(struct wx *wx); int wx_setup_isb_resources(struct wx *wx);
void wx_free_isb_resources(struct wx *wx); void wx_free_isb_resources(struct wx *wx);
u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
void wx_write_eitr(struct wx_q_vector *q_vector);
void wx_configure_vectors(struct wx *wx); void wx_configure_vectors(struct wx *wx);
void wx_clean_all_rx_rings(struct wx *wx); void wx_clean_all_rx_rings(struct wx *wx);
void wx_clean_all_tx_rings(struct wx *wx); void wx_clean_all_tx_rings(struct wx *wx);
...@@ -29,5 +30,7 @@ int wx_setup_resources(struct wx *wx); ...@@ -29,5 +30,7 @@ int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev, void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats); struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features); int wx_set_features(struct net_device *netdev, netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
#endif /* _NGBE_LIB_H_ */ #endif /* _NGBE_LIB_H_ */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <net/ip.h> #include <net/ip.h>
#define WX_NCSI_SUP 0x8000 #define WX_NCSI_SUP 0x8000
...@@ -130,6 +131,15 @@ ...@@ -130,6 +131,15 @@
#define WX_RDB_PFCMACDAH 0x19214 #define WX_RDB_PFCMACDAH 0x19214
#define WX_RDB_LXOFFTXC 0x19218 #define WX_RDB_LXOFFTXC 0x19218
#define WX_RDB_LXONTXC 0x1921C #define WX_RDB_LXONTXC 0x1921C
/* Flow Control Registers */
#define WX_RDB_RFCV 0x19200
#define WX_RDB_RFCL 0x19220
#define WX_RDB_RFCL_XONE BIT(31)
#define WX_RDB_RFCH 0x19260
#define WX_RDB_RFCH_XOFFE BIT(31)
#define WX_RDB_RFCRT 0x192A0
#define WX_RDB_RFCC 0x192A4
#define WX_RDB_RFCC_RFCE_802_3X BIT(3)
/* ring assignment */ /* ring assignment */
#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
#define WX_RDB_PL_CFG_L4HDR BIT(1) #define WX_RDB_PL_CFG_L4HDR BIT(1)
...@@ -137,8 +147,16 @@ ...@@ -137,8 +147,16 @@
#define WX_RDB_PL_CFG_L2HDR BIT(3) #define WX_RDB_PL_CFG_L2HDR BIT(3)
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4) #define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5) #define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4))
#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4))
#define WX_RDB_RA_CTL 0x194F4 #define WX_RDB_RA_CTL 0x194F4
#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */ #define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */
#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16)
#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17)
#define WX_RDB_RA_CTL_RSS_IPV6 BIT(20)
#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
/******************************* PSR Registers *******************************/ /******************************* PSR Registers *******************************/
/* psr control */ /* psr control */
...@@ -305,6 +323,7 @@ enum WX_MSCA_CMD_value { ...@@ -305,6 +323,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ #define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
#define WX_7K_ITR 595 #define WX_7K_ITR 595
#define WX_12K_ITR 336 #define WX_12K_ITR 336
#define WX_20K_ITR 200
#define WX_SP_MAX_EITR 0x00000FF8U #define WX_SP_MAX_EITR 0x00000FF8U
#define WX_EM_MAX_EITR 0x00007FFCU #define WX_EM_MAX_EITR 0x00007FFCU
...@@ -330,6 +349,7 @@ enum WX_MSCA_CMD_value { ...@@ -330,6 +349,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) #define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */ /* PX_RR_CFG bit definitions */
#define WX_PX_RR_CFG_VLAN BIT(31) #define WX_PX_RR_CFG_VLAN BIT(31)
#define WX_PX_RR_CFG_DROP_EN BIT(30)
#define WX_PX_RR_CFG_SPLIT_MODE BIT(26) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
#define WX_PX_RR_CFG_RR_THER_SHIFT 16 #define WX_PX_RR_CFG_RR_THER_SHIFT 16
#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) #define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
...@@ -367,8 +387,46 @@ enum WX_MSCA_CMD_value { ...@@ -367,8 +387,46 @@ enum WX_MSCA_CMD_value {
#define WX_MAC_STATE_MODIFIED 0x2 #define WX_MAC_STATE_MODIFIED 0x2
#define WX_MAC_STATE_IN_USE 0x4 #define WX_MAC_STATE_IN_USE 0x4
/* BitTimes (BT) conversion */
#define WX_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024))
#define WX_B2BT(BT) ((BT) * 8)
/* Calculate Delay to respond to PFC */
#define WX_PFC_D 672
/* Calculate Cable Delay */
#define WX_CABLE_DC 5556 /* Delay Copper */
/* Calculate Delay incurred from higher layer */
#define WX_HD 6144
/* Calculate Interface Delay */
#define WX_PHY_D 12800
#define WX_MAC_D 4096
#define WX_XAUI_D (2 * 1024)
#define WX_ID (WX_MAC_D + WX_XAUI_D + WX_PHY_D)
/* Calculate PCI Bus delay for low thresholds */
#define WX_PCI_DELAY 10000
/* Calculate delay value in bit times */
#define WX_DV(_max_frame_link, _max_frame_tc) \
((36 * (WX_B2BT(_max_frame_link) + WX_PFC_D + \
(2 * WX_CABLE_DC) + (2 * WX_ID) + WX_HD) / 25 + 1) + \
2 * WX_B2BT(_max_frame_tc))
/* Calculate low threshold delay values */
#define WX_LOW_DV(_max_frame_tc) \
(2 * (2 * WX_B2BT(_max_frame_tc) + (36 * WX_PCI_DELAY / 25) + 1))
/* flow control */
#define WX_DEFAULT_FCPAUSE 0xFFFF
#define WX_MAX_RXD 8192 #define WX_MAX_RXD 8192
#define WX_MAX_TXD 8192 #define WX_MAX_TXD 8192
#define WX_MIN_RXD 128
#define WX_MIN_TXD 128
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8
#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
#define VMDQ_P(p) p #define VMDQ_P(p) p
...@@ -871,6 +929,19 @@ struct wx_q_vector { ...@@ -871,6 +929,19 @@ struct wx_q_vector {
struct wx_ring ring[] ____cacheline_internodealigned_in_smp; struct wx_ring ring[] ____cacheline_internodealigned_in_smp;
}; };
struct wx_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
u16 mask; /* Mask used for feature to ring mapping */
u16 offset; /* offset to start of feature */
};
enum wx_ring_f_enum {
RING_F_NONE = 0,
RING_F_RSS,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
enum wx_isb_idx { enum wx_isb_idx {
WX_ISB_HEADER, WX_ISB_HEADER,
WX_ISB_MISC, WX_ISB_MISC,
...@@ -879,6 +950,11 @@ enum wx_isb_idx { ...@@ -879,6 +950,11 @@ enum wx_isb_idx {
WX_ISB_MAX WX_ISB_MAX
}; };
struct wx_fc_info {
u32 high_water; /* Flow Ctrl High-water */
u32 low_water; /* Flow Ctrl Low-water */
};
/* Statistics counters collected by the MAC */ /* Statistics counters collected by the MAC */
struct wx_hw_stats { struct wx_hw_stats {
u64 gprc; u64 gprc;
...@@ -919,6 +995,7 @@ struct wx { ...@@ -919,6 +995,7 @@ struct wx {
enum sp_media_type media_type; enum sp_media_type media_type;
struct wx_eeprom_info eeprom; struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl; struct wx_addr_filter_info addr_ctrl;
struct wx_fc_info fc;
struct wx_mac_addr *mac_table; struct wx_mac_addr *mac_table;
u16 device_id; u16 device_id;
u16 vendor_id; u16 vendor_id;
...@@ -939,6 +1016,8 @@ struct wx { ...@@ -939,6 +1016,8 @@ struct wx {
int speed; int speed;
int duplex; int duplex;
struct phy_device *phydev; struct phy_device *phydev;
struct phylink *phylink;
struct phylink_config phylink_config;
bool wol_hw_supported; bool wol_hw_supported;
bool ncsi_enabled; bool ncsi_enabled;
...@@ -966,7 +1045,10 @@ struct wx { ...@@ -966,7 +1045,10 @@ struct wx {
struct wx_q_vector *q_vector[64]; struct wx_q_vector *q_vector[64];
unsigned int queues_per_pool; unsigned int queues_per_pool;
struct msix_entry *msix_entries; struct msix_entry *msix_q_entries;
struct msix_entry *msix_entry;
bool msix_in_use;
struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */ /* misc interrupt status block */
dma_addr_t isb_dma; dma_addr_t isb_dma;
...@@ -974,8 +1056,9 @@ struct wx { ...@@ -974,8 +1056,9 @@ struct wx {
u32 isb_tag[WX_ISB_MAX]; u32 isb_tag[WX_ISB_MAX];
#define WX_MAX_RETA_ENTRIES 128 #define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
bool rss_enabled;
#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ #define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key; u32 *rss_key;
u32 wol; u32 wol;
...@@ -992,7 +1075,7 @@ struct wx { ...@@ -992,7 +1075,7 @@ struct wx {
}; };
#define WX_INTR_ALL (~0ULL) #define WX_INTR_ALL (~0ULL)
#define WX_INTR_Q(i) BIT(i) #define WX_INTR_Q(i) BIT((i) + 1)
/* register operations */ /* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
...@@ -1044,4 +1127,9 @@ rd64(struct wx *wx, u32 reg) ...@@ -1044,4 +1127,9 @@ rd64(struct wx *wx, u32 reg)
#define wx_dbg(wx, fmt, arg...) \ #define wx_dbg(wx, fmt, arg...) \
dev_dbg(&(wx)->pdev->dev, fmt, ##arg) dev_dbg(&(wx)->pdev->dev, fmt, ##arg)
static inline struct wx *phylink_to_wx(struct phylink_config *config)
{
return container_of(config, struct wx, phylink_config);
}
#endif /* _WX_TYPE_H_ */ #endif /* _WX_TYPE_H_ */
...@@ -7,7 +7,10 @@ ...@@ -7,7 +7,10 @@
#include "../libwx/wx_ethtool.h" #include "../libwx/wx_ethtool.h"
#include "../libwx/wx_type.h" #include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_hw.h"
#include "ngbe_ethtool.h" #include "ngbe_ethtool.h"
#include "ngbe_type.h"
static void ngbe_get_wol(struct net_device *netdev, static void ngbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol) struct ethtool_wolinfo *wol)
...@@ -41,12 +44,75 @@ static int ngbe_set_wol(struct net_device *netdev, ...@@ -41,12 +44,75 @@ static int ngbe_set_wol(struct net_device *netdev,
return 0; return 0;
} }
static int ngbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
int i;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
if (new_tx_count == wx->tx_ring_count &&
new_rx_count == wx->rx_ring_count)
return 0;
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->count = new_rx_count;
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
return 0;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
if (!temp_ring)
return -ENOMEM;
ngbe_down(wx);
wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
kvfree(temp_ring);
wx_configure(wx);
ngbe_up(wx);
return 0;
}
static int ngbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
int err;
err = wx_set_channels(dev, ch);
if (err < 0)
return err;
/* use setup TC to update any traffic class queue mapping */
return ngbe_setup_tc(dev, netdev_get_num_tc(dev));
}
static const struct ethtool_ops ngbe_ethtool_ops = { static const struct ethtool_ops ngbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_drvinfo = wx_get_drvinfo, .get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings, .get_link_ksettings = wx_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings, .set_link_ksettings = wx_set_link_ksettings,
.nway_reset = phy_ethtool_nway_reset, .nway_reset = wx_nway_reset,
.get_wol = ngbe_get_wol, .get_wol = ngbe_get_wol,
.set_wol = ngbe_set_wol, .set_wol = ngbe_set_wol,
.get_sset_count = wx_get_sset_count, .get_sset_count = wx_get_sset_count,
...@@ -54,6 +120,16 @@ static const struct ethtool_ops ngbe_ethtool_ops = { ...@@ -54,6 +120,16 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.get_ethtool_stats = wx_get_ethtool_stats, .get_ethtool_stats = wx_get_ethtool_stats,
.get_eth_mac_stats = wx_get_mac_stats, .get_eth_mac_stats = wx_get_mac_stats,
.get_pause_stats = wx_get_pause_stats, .get_pause_stats = wx_get_pause_stats,
.get_pauseparam = wx_get_pauseparam,
.set_pauseparam = wx_set_pauseparam,
.get_ringparam = wx_get_ringparam,
.set_ringparam = ngbe_set_ringparam,
.get_coalesce = wx_get_coalesce,
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = ngbe_set_channels,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
}; };
void ngbe_set_ethtool_ops(struct net_device *netdev) void ngbe_set_ethtool_ops(struct net_device *netdev)
......
...@@ -79,28 +79,6 @@ static void ngbe_init_type_code(struct wx *wx) ...@@ -79,28 +79,6 @@ static void ngbe_init_type_code(struct wx *wx)
} }
} }
/**
* ngbe_init_rss_key - Initialize wx RSS key
* @wx: device handle
*
* Allocates and initializes the RSS key if it is not allocated.
**/
static inline int ngbe_init_rss_key(struct wx *wx)
{
u32 *rss_key;
if (!wx->rss_key) {
rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
if (unlikely(!rss_key))
return -ENOMEM;
netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
wx->rss_key = rss_key;
}
return 0;
}
/** /**
* ngbe_sw_init - Initialize general software structures * ngbe_sw_init - Initialize general software structures
* @wx: board private structure to initialize * @wx: board private structure to initialize
...@@ -134,8 +112,9 @@ static int ngbe_sw_init(struct wx *wx) ...@@ -134,8 +112,9 @@ static int ngbe_sw_init(struct wx *wx)
dev_err(&pdev->dev, "Do not support MSI-X\n"); dev_err(&pdev->dev, "Do not support MSI-X\n");
wx->mac.max_msix_vectors = msix_count; wx->mac.max_msix_vectors = msix_count;
if (ngbe_init_rss_key(wx)) wx->ring_feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES,
return -ENOMEM; num_online_cpus());
wx->rss_enabled = true;
/* enable itr by default in dynamic mode */ /* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1; wx->rx_itr_setting = 1;
...@@ -175,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues) ...@@ -175,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
if (queues) if (queues)
wx_intr_enable(wx, NGBE_INTR_ALL); wx_intr_enable(wx, NGBE_INTR_ALL);
else else
wx_intr_enable(wx, NGBE_INTR_MISC(wx)); wx_intr_enable(wx, NGBE_INTR_MISC);
} }
/** /**
...@@ -241,7 +220,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) ...@@ -241,7 +220,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) { for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector]; struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_entries[vector]; struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring) if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name) - 1,
...@@ -259,7 +238,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) ...@@ -259,7 +238,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
} }
} }
err = request_irq(wx->msix_entries[vector].vector, err = request_irq(wx->msix_entry->vector,
ngbe_msix_other, 0, netdev->name, wx); ngbe_msix_other, 0, netdev->name, wx);
if (err) { if (err) {
...@@ -272,7 +251,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) ...@@ -272,7 +251,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
free_queue_irqs: free_queue_irqs:
while (vector) { while (vector) {
vector--; vector--;
free_irq(wx->msix_entries[vector].vector, free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]); wx->q_vector[vector]);
} }
wx_reset_interrupt_capability(wx); wx_reset_interrupt_capability(wx);
...@@ -334,15 +313,15 @@ static void ngbe_disable_device(struct wx *wx) ...@@ -334,15 +313,15 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx); wx_update_stats(wx);
} }
static void ngbe_down(struct wx *wx) void ngbe_down(struct wx *wx)
{ {
phy_stop(wx->phydev); phylink_stop(wx->phylink);
ngbe_disable_device(wx); ngbe_disable_device(wx);
wx_clean_all_tx_rings(wx); wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx); wx_clean_all_rx_rings(wx);
} }
static void ngbe_up(struct wx *wx) void ngbe_up(struct wx *wx)
{ {
wx_configure_vectors(wx); wx_configure_vectors(wx);
...@@ -359,7 +338,7 @@ static void ngbe_up(struct wx *wx) ...@@ -359,7 +338,7 @@ static void ngbe_up(struct wx *wx)
if (wx->gpio_ctrl) if (wx->gpio_ctrl)
ngbe_sfp_modules_txrx_powerctl(wx, true); ngbe_sfp_modules_txrx_powerctl(wx, true);
phy_start(wx->phydev); phylink_start(wx->phylink);
} }
/** /**
...@@ -388,7 +367,7 @@ static int ngbe_open(struct net_device *netdev) ...@@ -388,7 +367,7 @@ static int ngbe_open(struct net_device *netdev)
if (err) if (err)
goto err_free_resources; goto err_free_resources;
err = ngbe_phy_connect(wx); err = phylink_connect_phy(wx->phylink, wx->phydev);
if (err) if (err)
goto err_free_irq; goto err_free_irq;
...@@ -404,7 +383,7 @@ static int ngbe_open(struct net_device *netdev) ...@@ -404,7 +383,7 @@ static int ngbe_open(struct net_device *netdev)
return 0; return 0;
err_dis_phy: err_dis_phy:
phy_disconnect(wx->phydev); phylink_disconnect_phy(wx->phylink);
err_free_irq: err_free_irq:
wx_free_irq(wx); wx_free_irq(wx);
err_free_resources: err_free_resources:
...@@ -430,7 +409,7 @@ static int ngbe_close(struct net_device *netdev) ...@@ -430,7 +409,7 @@ static int ngbe_close(struct net_device *netdev)
ngbe_down(wx); ngbe_down(wx);
wx_free_irq(wx); wx_free_irq(wx);
wx_free_resources(wx); wx_free_resources(wx);
phy_disconnect(wx->phydev); phylink_disconnect_phy(wx->phylink);
wx_control_hw(wx, false); wx_control_hw(wx, false);
return 0; return 0;
...@@ -480,6 +459,39 @@ static void ngbe_shutdown(struct pci_dev *pdev) ...@@ -480,6 +459,39 @@ static void ngbe_shutdown(struct pci_dev *pdev)
} }
} }
/**
* ngbe_setup_tc - routine to configure net_device for multiple traffic
* classes.
*
* @dev: net device to configure
* @tc: number of traffic classes to enable
*/
int ngbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
ngbe_close(dev);
wx_clear_interrupt_scheme(wx);
if (tc)
netdev_set_num_tc(dev, tc);
else
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
if (netif_running(dev))
ngbe_open(dev);
return 0;
}
static const struct net_device_ops ngbe_netdev_ops = { static const struct net_device_ops ngbe_netdev_ops = {
.ndo_open = ngbe_open, .ndo_open = ngbe_open,
.ndo_stop = ngbe_close, .ndo_stop = ngbe_close,
...@@ -681,6 +693,7 @@ static int ngbe_probe(struct pci_dev *pdev, ...@@ -681,6 +693,7 @@ static int ngbe_probe(struct pci_dev *pdev,
return 0; return 0;
err_register: err_register:
phylink_destroy(wx->phylink);
wx_control_hw(wx, false); wx_control_hw(wx, false);
err_clear_interrupt_scheme: err_clear_interrupt_scheme:
wx_clear_interrupt_scheme(wx); wx_clear_interrupt_scheme(wx);
...@@ -710,9 +723,11 @@ static void ngbe_remove(struct pci_dev *pdev) ...@@ -710,9 +723,11 @@ static void ngbe_remove(struct pci_dev *pdev)
netdev = wx->netdev; netdev = wx->netdev;
unregister_netdev(netdev); unregister_netdev(netdev);
phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev, pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM)); pci_select_bars(pdev, IORESOURCE_MEM));
kfree(wx->rss_key);
kfree(wx->mac_table); kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx); wx_clear_interrupt_scheme(wx);
......
...@@ -56,22 +56,28 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, ...@@ -56,22 +56,28 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr,
return ret; return ret;
} }
static void ngbe_handle_link_change(struct net_device *dev) static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{ {
struct wx *wx = netdev_priv(dev); }
struct phy_device *phydev;
static void ngbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
}
static void ngbe_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct wx *wx = phylink_to_wx(config);
u32 lan_speed, reg; u32 lan_speed, reg;
phydev = wx->phydev; wx_fc_enable(wx, tx_pause, rx_pause);
if (!(wx->link != phydev->link ||
wx->speed != phydev->speed ||
wx->duplex != phydev->duplex))
return;
wx->link = phydev->link; switch (speed) {
wx->speed = phydev->speed;
wx->duplex = phydev->duplex;
switch (phydev->speed) {
case SPEED_10: case SPEED_10:
lan_speed = 0; lan_speed = 0;
break; break;
...@@ -83,54 +89,51 @@ static void ngbe_handle_link_change(struct net_device *dev) ...@@ -83,54 +89,51 @@ static void ngbe_handle_link_change(struct net_device *dev)
lan_speed = 2; lan_speed = 2;
break; break;
} }
wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed);
if (phydev->link) {
reg = rd32(wx, WX_MAC_TX_CFG); reg = rd32(wx, WX_MAC_TX_CFG);
reg &= ~WX_MAC_TX_CFG_SPEED_MASK; reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
wr32(wx, WX_MAC_TX_CFG, reg); wr32(wx, WX_MAC_TX_CFG, reg);
/* Re configure MAC RX */
/* Re configure MAC Rx */
reg = rd32(wx, WX_MAC_RX_CFG); reg = rd32(wx, WX_MAC_RX_CFG);
wr32(wx, WX_MAC_RX_CFG, reg); wr32(wx, WX_MAC_RX_CFG, reg);
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT); reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg); wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
}
phy_print_status(phydev);
} }
int ngbe_phy_connect(struct wx *wx) static const struct phylink_mac_ops ngbe_mac_ops = {
.mac_config = ngbe_mac_config,
.mac_link_down = ngbe_mac_link_down,
.mac_link_up = ngbe_mac_link_up,
};
static int ngbe_phylink_init(struct wx *wx)
{ {
int ret; struct phylink_config *config;
phy_interface_t phy_mode;
struct phylink *phylink;
ret = phy_connect_direct(wx->netdev, config = &wx->phylink_config;
wx->phydev, config->dev = &wx->netdev->dev;
ngbe_handle_link_change, config->type = PHYLINK_NETDEV;
PHY_INTERFACE_MODE_RGMII_ID); config->mac_capabilities = MAC_1000FD | MAC_100FD | MAC_10FD |
if (ret) { MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
wx_err(wx, "PHY connect failed.\n"); config->mac_managed_pm = true;
return ret;
}
return 0; phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
} __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces);
static void ngbe_phy_fixup(struct wx *wx) phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops);
{ if (IS_ERR(phylink))
struct phy_device *phydev = wx->phydev; return PTR_ERR(phylink);
struct ethtool_eee eee;
wx->phylink = phylink;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); return 0;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phydev->mac_managed_pm = true;
if (wx->mac_type != em_mac_type_mdi)
return;
/* disable EEE, internal phy does not support eee */
memset(&eee, 0, sizeof(eee));
phy_ethtool_set_eee(phydev, &eee);
} }
int ngbe_mdio_init(struct wx *wx) int ngbe_mdio_init(struct wx *wx)
...@@ -165,11 +168,16 @@ int ngbe_mdio_init(struct wx *wx) ...@@ -165,11 +168,16 @@ int ngbe_mdio_init(struct wx *wx)
return -ENODEV; return -ENODEV;
phy_attached_info(wx->phydev); phy_attached_info(wx->phydev);
ngbe_phy_fixup(wx);
wx->link = 0; wx->link = 0;
wx->speed = 0; wx->speed = 0;
wx->duplex = 0; wx->duplex = 0;
ret = ngbe_phylink_init(wx);
if (ret) {
wx_err(wx, "failed to init phylink: %d\n", ret);
return ret;
}
return 0; return 0;
} }
...@@ -7,6 +7,5 @@ ...@@ -7,6 +7,5 @@
#ifndef _NGBE_MDIO_H_ #ifndef _NGBE_MDIO_H_
#define _NGBE_MDIO_H_ #define _NGBE_MDIO_H_
int ngbe_phy_connect(struct wx *wx);
int ngbe_mdio_init(struct wx *wx); int ngbe_mdio_init(struct wx *wx);
#endif /* _NGBE_MDIO_H_ */ #endif /* _NGBE_MDIO_H_ */
...@@ -80,7 +80,7 @@ ...@@ -80,7 +80,7 @@
NGBE_PX_MISC_IEN_GPIO) NGBE_PX_MISC_IEN_GPIO)
#define NGBE_INTR_ALL 0x1FF #define NGBE_INTR_ALL 0x1FF
#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) #define NGBE_INTR_MISC BIT(0)
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) #define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
#define NGBE_CFG_LAN_SPEED 0x14440 #define NGBE_CFG_LAN_SPEED 0x14440
...@@ -105,6 +105,7 @@ ...@@ -105,6 +105,7 @@
#define NGBE_FW_CMD_ST_FAIL 0x70657376 #define NGBE_FW_CMD_ST_FAIL 0x70657376
#define NGBE_MAX_FDIR_INDICES 7 #define NGBE_MAX_FDIR_INDICES 7
#define NGBE_MAX_RSS_INDICES 8
#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) #define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) #define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
...@@ -130,4 +131,8 @@ ...@@ -130,4 +131,8 @@
extern char ngbe_driver_name[]; extern char ngbe_driver_name[];
void ngbe_down(struct wx *wx);
void ngbe_up(struct wx *wx);
int ngbe_setup_tc(struct net_device *dev, u8 tc);
#endif /* _NGBE_TYPE_H_ */ #endif /* _NGBE_TYPE_H_ */
...@@ -7,43 +7,93 @@ ...@@ -7,43 +7,93 @@
#include "../libwx/wx_ethtool.h" #include "../libwx/wx_ethtool.h"
#include "../libwx/wx_type.h" #include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "txgbe_type.h" #include "txgbe_type.h"
#include "txgbe_ethtool.h" #include "txgbe_ethtool.h"
static int txgbe_nway_reset(struct net_device *netdev) static int txgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{ {
struct txgbe *txgbe = netdev_to_txgbe(netdev); struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
int i;
return phylink_ethtool_nway_reset(txgbe->phylink); new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
} new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
static int txgbe_get_link_ksettings(struct net_device *netdev, new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
struct ethtool_link_ksettings *cmd) new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
{
struct txgbe *txgbe = netdev_to_txgbe(netdev); if (new_tx_count == wx->tx_ring_count &&
new_rx_count == wx->rx_ring_count)
return 0;
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->count = new_rx_count;
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
return 0;
}
return phylink_ethtool_ksettings_get(txgbe->phylink, cmd); /* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
if (!temp_ring)
return -ENOMEM;
txgbe_down(wx);
wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
kvfree(temp_ring);
txgbe_up(wx);
return 0;
} }
static int txgbe_set_link_ksettings(struct net_device *netdev, static int txgbe_set_channels(struct net_device *dev,
const struct ethtool_link_ksettings *cmd) struct ethtool_channels *ch)
{ {
struct txgbe *txgbe = netdev_to_txgbe(netdev); int err;
err = wx_set_channels(dev, ch);
if (err < 0)
return err;
return phylink_ethtool_ksettings_set(txgbe->phylink, cmd); /* use setup TC to update any traffic class queue mapping */
return txgbe_setup_tc(dev, netdev_get_num_tc(dev));
} }
static const struct ethtool_ops txgbe_ethtool_ops = { static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_drvinfo = wx_get_drvinfo, .get_drvinfo = wx_get_drvinfo,
.nway_reset = txgbe_nway_reset, .nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_link_ksettings = txgbe_get_link_ksettings, .get_link_ksettings = wx_get_link_ksettings,
.set_link_ksettings = txgbe_set_link_ksettings, .set_link_ksettings = wx_set_link_ksettings,
.get_sset_count = wx_get_sset_count, .get_sset_count = wx_get_sset_count,
.get_strings = wx_get_strings, .get_strings = wx_get_strings,
.get_ethtool_stats = wx_get_ethtool_stats, .get_ethtool_stats = wx_get_ethtool_stats,
.get_eth_mac_stats = wx_get_mac_stats, .get_eth_mac_stats = wx_get_mac_stats,
.get_pause_stats = wx_get_pause_stats, .get_pause_stats = wx_get_pause_stats,
.get_pauseparam = wx_get_pauseparam,
.set_pauseparam = wx_set_pauseparam,
.get_ringparam = wx_get_ringparam,
.set_ringparam = txgbe_set_ringparam,
.get_coalesce = wx_get_coalesce,
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = txgbe_set_channels,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
}; };
void txgbe_set_ethtool_ops(struct net_device *netdev) void txgbe_set_ethtool_ops(struct net_device *netdev)
......
...@@ -86,7 +86,7 @@ static void txgbe_irq_enable(struct wx *wx, bool queues) ...@@ -86,7 +86,7 @@ static void txgbe_irq_enable(struct wx *wx, bool queues)
wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
/* unmask interrupt */ /* unmask interrupt */
wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); wx_intr_enable(wx, TXGBE_INTR_MISC);
if (queues) if (queues)
wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
} }
...@@ -145,7 +145,7 @@ static int txgbe_request_msix_irqs(struct wx *wx) ...@@ -145,7 +145,7 @@ static int txgbe_request_msix_irqs(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) { for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector]; struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_entries[vector]; struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring) if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name) - 1,
...@@ -168,7 +168,7 @@ static int txgbe_request_msix_irqs(struct wx *wx) ...@@ -168,7 +168,7 @@ static int txgbe_request_msix_irqs(struct wx *wx)
free_queue_irqs: free_queue_irqs:
while (vector) { while (vector) {
vector--; vector--;
free_irq(wx->msix_entries[vector].vector, free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]); wx->q_vector[vector]);
} }
wx_reset_interrupt_capability(wx); wx_reset_interrupt_capability(wx);
...@@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx) ...@@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx)
static void txgbe_up_complete(struct wx *wx) static void txgbe_up_complete(struct wx *wx)
{ {
struct net_device *netdev = wx->netdev; struct net_device *netdev = wx->netdev;
struct txgbe *txgbe;
wx_control_hw(wx, true); wx_control_hw(wx, true);
wx_configure_vectors(wx); wx_configure_vectors(wx);
...@@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx) ...@@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic(); smp_mb__before_atomic();
wx_napi_enable_all(wx); wx_napi_enable_all(wx);
txgbe = netdev_to_txgbe(netdev); phylink_start(wx->phylink);
phylink_start(txgbe->phylink);
/* clear any pending interrupts, may auto mask */ /* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0)); rd32(wx, WX_PX_IC(0));
...@@ -290,18 +288,22 @@ static void txgbe_disable_device(struct wx *wx) ...@@ -290,18 +288,22 @@ static void txgbe_disable_device(struct wx *wx)
wx_update_stats(wx); wx_update_stats(wx);
} }
static void txgbe_down(struct wx *wx) void txgbe_down(struct wx *wx)
{ {
struct txgbe *txgbe = netdev_to_txgbe(wx->netdev);
txgbe_disable_device(wx); txgbe_disable_device(wx);
txgbe_reset(wx); txgbe_reset(wx);
phylink_stop(txgbe->phylink); phylink_stop(wx->phylink);
wx_clean_all_tx_rings(wx); wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx); wx_clean_all_rx_rings(wx);
} }
void txgbe_up(struct wx *wx)
{
wx_configure(wx);
txgbe_up_complete(wx);
}
/** /**
* txgbe_init_type_code - Initialize the shared code * txgbe_init_type_code - Initialize the shared code
* @wx: pointer to hardware structure * @wx: pointer to hardware structure
...@@ -376,6 +378,10 @@ static int txgbe_sw_init(struct wx *wx) ...@@ -376,6 +378,10 @@ static int txgbe_sw_init(struct wx *wx)
wx_err(wx, "Do not support MSI-X\n"); wx_err(wx, "Do not support MSI-X\n");
wx->mac.max_msix_vectors = msix_count; wx->mac.max_msix_vectors = msix_count;
wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES,
num_online_cpus());
wx->rss_enabled = true;
/* enable itr by default in dynamic mode */ /* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1; wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1; wx->tx_itr_setting = 1;
...@@ -502,6 +508,41 @@ static void txgbe_shutdown(struct pci_dev *pdev) ...@@ -502,6 +508,41 @@ static void txgbe_shutdown(struct pci_dev *pdev)
} }
} }
/**
* txgbe_setup_tc - routine to configure net_device for multiple traffic
* classes.
*
* @dev: net device to configure
* @tc: number of traffic classes to enable
*/
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
txgbe_close(dev);
else
txgbe_reset(wx);
wx_clear_interrupt_scheme(wx);
if (tc)
netdev_set_num_tc(dev, tc);
else
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
if (netif_running(dev))
txgbe_open(dev);
return 0;
}
static const struct net_device_ops txgbe_netdev_ops = { static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open, .ndo_open = txgbe_open,
.ndo_stop = txgbe_close, .ndo_stop = txgbe_close,
...@@ -776,6 +817,7 @@ static void txgbe_remove(struct pci_dev *pdev) ...@@ -776,6 +817,7 @@ static void txgbe_remove(struct pci_dev *pdev)
pci_release_selected_regions(pdev, pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM)); pci_select_bars(pdev, IORESOURCE_MEM));
kfree(wx->rss_key);
kfree(wx->mac_table); kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx); wx_clear_interrupt_scheme(wx);
......
...@@ -159,7 +159,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe) ...@@ -159,7 +159,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe)
static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config, static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config,
phy_interface_t interface) phy_interface_t interface)
{ {
struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); struct wx *wx = phylink_to_wx(config);
struct txgbe *txgbe = wx->priv;
if (interface == PHY_INTERFACE_MODE_10GBASER) if (interface == PHY_INTERFACE_MODE_10GBASER)
return &txgbe->xpcs->pcs; return &txgbe->xpcs->pcs;
...@@ -175,7 +176,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, ...@@ -175,7 +176,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode,
static void txgbe_mac_link_down(struct phylink_config *config, static void txgbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface) unsigned int mode, phy_interface_t interface)
{ {
struct wx *wx = netdev_priv(to_net_dev(config->dev)); struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
} }
...@@ -186,9 +187,11 @@ static void txgbe_mac_link_up(struct phylink_config *config, ...@@ -186,9 +187,11 @@ static void txgbe_mac_link_up(struct phylink_config *config,
int speed, int duplex, int speed, int duplex,
bool tx_pause, bool rx_pause) bool tx_pause, bool rx_pause)
{ {
struct wx *wx = netdev_priv(to_net_dev(config->dev)); struct wx *wx = phylink_to_wx(config);
u32 txcfg, wdg; u32 txcfg, wdg;
wx_fc_enable(wx, tx_pause, rx_pause);
txcfg = rd32(wx, WX_MAC_TX_CFG); txcfg = rd32(wx, WX_MAC_TX_CFG);
txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK; txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK;
...@@ -217,7 +220,7 @@ static void txgbe_mac_link_up(struct phylink_config *config, ...@@ -217,7 +220,7 @@ static void txgbe_mac_link_up(struct phylink_config *config,
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface) phy_interface_t interface)
{ {
struct wx *wx = netdev_priv(to_net_dev(config->dev)); struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
...@@ -228,7 +231,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, ...@@ -228,7 +231,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t interface) phy_interface_t interface)
{ {
struct wx *wx = netdev_priv(to_net_dev(config->dev)); struct wx *wx = phylink_to_wx(config);
txgbe_enable_sec_tx_path(wx); txgbe_enable_sec_tx_path(wx);
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
...@@ -253,10 +256,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) ...@@ -253,10 +256,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
phy_interface_t phy_mode; phy_interface_t phy_mode;
struct phylink *phylink; struct phylink *phylink;
config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL); config = &wx->phylink_config;
if (!config)
return -ENOMEM;
config->dev = &wx->netdev->dev; config->dev = &wx->netdev->dev;
config->type = PHYLINK_NETDEV; config->type = PHYLINK_NETDEV;
config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
...@@ -287,7 +287,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) ...@@ -287,7 +287,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
} }
} }
txgbe->phylink = phylink; wx->phylink = phylink;
return 0; return 0;
} }
...@@ -483,11 +483,11 @@ static void txgbe_irq_handler(struct irq_desc *desc) ...@@ -483,11 +483,11 @@ static void txgbe_irq_handler(struct irq_desc *desc)
TXGBE_PX_MISC_ETH_AN)) { TXGBE_PX_MISC_ETH_AN)) {
u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
} }
/* unmask interrupt */ /* unmask interrupt */
wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); wx_intr_enable(wx, TXGBE_INTR_MISC);
} }
static int txgbe_gpio_init(struct txgbe *txgbe) static int txgbe_gpio_init(struct txgbe *txgbe)
...@@ -531,7 +531,12 @@ static int txgbe_gpio_init(struct txgbe *txgbe) ...@@ -531,7 +531,12 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
sizeof(*girq->parents), GFP_KERNEL); sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents) if (!girq->parents)
return -ENOMEM; return -ENOMEM;
girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector;
/* now only suuported on MSI-X interrupt */
if (!wx->msix_entry)
return -EPERM;
girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE; girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq; girq->handler = handle_bad_irq;
...@@ -701,6 +706,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) ...@@ -701,6 +706,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
int txgbe_init_phy(struct txgbe *txgbe) int txgbe_init_phy(struct txgbe *txgbe)
{ {
struct wx *wx = txgbe->wx;
int ret; int ret;
if (txgbe->wx->media_type == sp_media_copper) if (txgbe->wx->media_type == sp_media_copper)
...@@ -708,46 +714,48 @@ int txgbe_init_phy(struct txgbe *txgbe) ...@@ -708,46 +714,48 @@ int txgbe_init_phy(struct txgbe *txgbe)
ret = txgbe_swnodes_register(txgbe); ret = txgbe_swnodes_register(txgbe);
if (ret) { if (ret) {
wx_err(txgbe->wx, "failed to register software nodes\n"); wx_err(wx, "failed to register software nodes\n");
return ret; return ret;
} }
ret = txgbe_mdio_pcs_init(txgbe); ret = txgbe_mdio_pcs_init(txgbe);
if (ret) { if (ret) {
wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret); wx_err(wx, "failed to init mdio pcs: %d\n", ret);
goto err_unregister_swnode; goto err_unregister_swnode;
} }
ret = txgbe_phylink_init(txgbe); ret = txgbe_phylink_init(txgbe);
if (ret) { if (ret) {
wx_err(txgbe->wx, "failed to init phylink\n"); wx_err(wx, "failed to init phylink\n");
goto err_destroy_xpcs; goto err_destroy_xpcs;
} }
ret = txgbe_gpio_init(txgbe); ret = txgbe_gpio_init(txgbe);
if (ret) { if (ret) {
wx_err(txgbe->wx, "failed to init gpio\n"); wx_err(wx, "failed to init gpio\n");
goto err_destroy_phylink; goto err_destroy_phylink;
} }
ret = txgbe_clock_register(txgbe); ret = txgbe_clock_register(txgbe);
if (ret) { if (ret) {
wx_err(txgbe->wx, "failed to register clock: %d\n", ret); wx_err(wx, "failed to register clock: %d\n", ret);
goto err_destroy_phylink; goto err_destroy_phylink;
} }
ret = txgbe_i2c_register(txgbe); ret = txgbe_i2c_register(txgbe);
if (ret) { if (ret) {
wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret); wx_err(wx, "failed to init i2c interface: %d\n", ret);
goto err_unregister_clk; goto err_unregister_clk;
} }
ret = txgbe_sfp_register(txgbe); ret = txgbe_sfp_register(txgbe);
if (ret) { if (ret) {
wx_err(txgbe->wx, "failed to register sfp\n"); wx_err(wx, "failed to register sfp\n");
goto err_unregister_i2c; goto err_unregister_i2c;
} }
wx->msix_in_use = true;
return 0; return 0;
err_unregister_i2c: err_unregister_i2c:
...@@ -756,7 +764,7 @@ int txgbe_init_phy(struct txgbe *txgbe) ...@@ -756,7 +764,7 @@ int txgbe_init_phy(struct txgbe *txgbe)
clkdev_drop(txgbe->clock); clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk); clk_unregister(txgbe->clk);
err_destroy_phylink: err_destroy_phylink:
phylink_destroy(txgbe->phylink); phylink_destroy(wx->phylink);
err_destroy_xpcs: err_destroy_xpcs:
xpcs_destroy(txgbe->xpcs); xpcs_destroy(txgbe->xpcs);
err_unregister_swnode: err_unregister_swnode:
...@@ -768,8 +776,8 @@ int txgbe_init_phy(struct txgbe *txgbe) ...@@ -768,8 +776,8 @@ int txgbe_init_phy(struct txgbe *txgbe)
void txgbe_remove_phy(struct txgbe *txgbe) void txgbe_remove_phy(struct txgbe *txgbe)
{ {
if (txgbe->wx->media_type == sp_media_copper) { if (txgbe->wx->media_type == sp_media_copper) {
phylink_disconnect_phy(txgbe->phylink); phylink_disconnect_phy(txgbe->wx->phylink);
phylink_destroy(txgbe->phylink); phylink_destroy(txgbe->wx->phylink);
return; return;
} }
...@@ -777,7 +785,8 @@ void txgbe_remove_phy(struct txgbe *txgbe) ...@@ -777,7 +785,8 @@ void txgbe_remove_phy(struct txgbe *txgbe)
platform_device_unregister(txgbe->i2c_dev); platform_device_unregister(txgbe->i2c_dev);
clkdev_drop(txgbe->clock); clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk); clk_unregister(txgbe->clk);
phylink_destroy(txgbe->phylink); phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs); xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group); software_node_unregister_node_group(txgbe->nodes.group);
txgbe->wx->msix_in_use = false;
} }
...@@ -98,6 +98,7 @@ ...@@ -98,6 +98,7 @@
#define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63 #define TXGBE_MAX_FDIR_INDICES 63
#define TXGBE_MAX_RSS_INDICES 63
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
...@@ -122,19 +123,16 @@ ...@@ -122,19 +123,16 @@
#define TXGBE_DEFAULT_RX_WORK 128 #define TXGBE_DEFAULT_RX_WORK 128
#endif #endif
#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) #define TXGBE_INTR_MISC BIT(0)
#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) #define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1)
#define TXGBE_MAX_EITR GENMASK(11, 3) #define TXGBE_MAX_EITR GENMASK(11, 3)
extern char txgbe_driver_name[]; extern char txgbe_driver_name[];
static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev) void txgbe_down(struct wx *wx);
{ void txgbe_up(struct wx *wx);
struct wx *wx = netdev_priv(netdev); int txgbe_setup_tc(struct net_device *dev, u8 tc);
return wx->priv;
}
#define NODE_PROP(_NAME, _PROP) \ #define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \ (const struct software_node) { \
...@@ -175,7 +173,6 @@ struct txgbe { ...@@ -175,7 +173,6 @@ struct txgbe {
struct wx *wx; struct wx *wx;
struct txgbe_nodes nodes; struct txgbe_nodes nodes;
struct dw_xpcs *xpcs; struct dw_xpcs *xpcs;
struct phylink *phylink;
struct platform_device *sfp_dev; struct platform_device *sfp_dev;
struct platform_device *i2c_dev; struct platform_device *i2c_dev;
struct clk_lookup *clock; struct clk_lookup *clock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment