Commit 7c08c41f authored by David S. Miller's avatar David S. Miller

Merge branch 'amd-xgbe-next'

Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2018-05-21

The following updates are included in this driver update series:

- Fix the debug output for the max channels count
- Read (once) and save the port property registers during probe
- Remove the use of the comm_owned field
- Remove unused SFP diagnostic support indicator field
- Add ethtool --module-info support
- Add ethtool --show-ring/--set-ring support
- Update the driver in preparation for ethtool --set-channels support
- Add ethtool --show-channels/--set-channels support
- Update the driver to always perform link training in KR mode
- Advertise FEC support when using a KR re-driver
- Update the BelFuse quirk to now support SGMII
- Improve 100Mbps auto-negotiation for BelFuse parts

This patch series is based on net-next.

---

Changes since v1:
- Update the --set-channels support to the use of the combined, rx and
  tx options as specified in the ethtool man page (in other words, don't
  create combined channels based on the min of the tx and rx channels
  specified).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 50b464b3 76cce0af
...@@ -1312,14 +1312,83 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) ...@@ -1312,14 +1312,83 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
return 0; return 0;
} }
static void xgbe_free_memory(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
/* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
}
static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct net_device *netdev = pdata->netdev;
int ret;
if (pdata->new_tx_ring_count) {
pdata->tx_ring_count = pdata->new_tx_ring_count;
pdata->tx_q_count = pdata->tx_ring_count;
pdata->new_tx_ring_count = 0;
}
if (pdata->new_rx_ring_count) {
pdata->rx_ring_count = pdata->new_rx_ring_count;
pdata->new_rx_ring_count = 0;
}
/* Calculate the Rx buffer size before allocating rings */
pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
/* Allocate the channel and ring structures */
ret = xgbe_alloc_channels(pdata);
if (ret)
return ret;
/* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata);
if (ret)
goto err_channels;
/* Initialize the service and Tx timers */
xgbe_init_timers(pdata);
return 0;
err_channels:
xgbe_free_memory(pdata);
return ret;
}
static int xgbe_start(struct xgbe_prv_data *pdata) static int xgbe_start(struct xgbe_prv_data *pdata)
{ {
struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_phy_if *phy_if = &pdata->phy_if; struct xgbe_phy_if *phy_if = &pdata->phy_if;
struct net_device *netdev = pdata->netdev; struct net_device *netdev = pdata->netdev;
unsigned int i;
int ret; int ret;
DBGPR("-->xgbe_start\n"); /* Set the number of queues */
ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
if (ret) {
netdev_err(netdev, "error setting real tx queue count\n");
return ret;
}
ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
if (ret) {
netdev_err(netdev, "error setting real rx queue count\n");
return ret;
}
/* Set RSS lookup table data for programming */
for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
i % pdata->rx_ring_count);
ret = hw_if->init(pdata); ret = hw_if->init(pdata);
if (ret) if (ret)
...@@ -1347,8 +1416,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata) ...@@ -1347,8 +1416,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
clear_bit(XGBE_STOPPED, &pdata->dev_state); clear_bit(XGBE_STOPPED, &pdata->dev_state);
DBGPR("<--xgbe_start\n");
return 0; return 0;
err_irqs: err_irqs:
...@@ -1426,10 +1493,22 @@ static void xgbe_stopdev(struct work_struct *work) ...@@ -1426,10 +1493,22 @@ static void xgbe_stopdev(struct work_struct *work)
netdev_alert(pdata->netdev, "device stopped\n"); netdev_alert(pdata->netdev, "device stopped\n");
} }
static void xgbe_restart_dev(struct xgbe_prv_data *pdata) void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
{ {
DBGPR("-->xgbe_restart_dev\n"); /* If not running, "restart" will happen on open */
if (!netif_running(pdata->netdev))
return;
xgbe_stop(pdata);
xgbe_free_memory(pdata);
xgbe_alloc_memory(pdata);
xgbe_start(pdata);
}
void xgbe_restart_dev(struct xgbe_prv_data *pdata)
{
/* If not running, "restart" will happen on open */ /* If not running, "restart" will happen on open */
if (!netif_running(pdata->netdev)) if (!netif_running(pdata->netdev))
return; return;
...@@ -1440,8 +1519,6 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata) ...@@ -1440,8 +1519,6 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
xgbe_free_rx_data(pdata); xgbe_free_rx_data(pdata);
xgbe_start(pdata); xgbe_start(pdata);
DBGPR("<--xgbe_restart_dev\n");
} }
static void xgbe_restart(struct work_struct *work) static void xgbe_restart(struct work_struct *work)
...@@ -1827,11 +1904,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, ...@@ -1827,11 +1904,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
static int xgbe_open(struct net_device *netdev) static int xgbe_open(struct net_device *netdev)
{ {
struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_desc_if *desc_if = &pdata->desc_if;
int ret; int ret;
DBGPR("-->xgbe_open\n");
/* Create the various names based on netdev name */ /* Create the various names based on netdev name */
snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
netdev_name(netdev)); netdev_name(netdev));
...@@ -1876,43 +1950,25 @@ static int xgbe_open(struct net_device *netdev) ...@@ -1876,43 +1950,25 @@ static int xgbe_open(struct net_device *netdev)
goto err_sysclk; goto err_sysclk;
} }
/* Calculate the Rx buffer size before allocating rings */
ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
if (ret < 0)
goto err_ptpclk;
pdata->rx_buf_size = ret;
/* Allocate the channel and ring structures */
ret = xgbe_alloc_channels(pdata);
if (ret)
goto err_ptpclk;
/* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata);
if (ret)
goto err_channels;
INIT_WORK(&pdata->service_work, xgbe_service); INIT_WORK(&pdata->service_work, xgbe_service);
INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->stopdev_work, xgbe_stopdev); INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
xgbe_init_timers(pdata);
ret = xgbe_alloc_memory(pdata);
if (ret)
goto err_ptpclk;
ret = xgbe_start(pdata); ret = xgbe_start(pdata);
if (ret) if (ret)
goto err_rings; goto err_mem;
clear_bit(XGBE_DOWN, &pdata->dev_state); clear_bit(XGBE_DOWN, &pdata->dev_state);
DBGPR("<--xgbe_open\n");
return 0; return 0;
err_rings: err_mem:
desc_if->free_ring_resources(pdata); xgbe_free_memory(pdata);
err_channels:
xgbe_free_channels(pdata);
err_ptpclk: err_ptpclk:
clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->ptpclk);
...@@ -1932,18 +1988,11 @@ static int xgbe_open(struct net_device *netdev) ...@@ -1932,18 +1988,11 @@ static int xgbe_open(struct net_device *netdev)
static int xgbe_close(struct net_device *netdev) static int xgbe_close(struct net_device *netdev)
{ {
struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_desc_if *desc_if = &pdata->desc_if;
DBGPR("-->xgbe_close\n");
/* Stop the device */ /* Stop the device */
xgbe_stop(pdata); xgbe_stop(pdata);
/* Free the ring descriptors and buffers */ xgbe_free_memory(pdata);
desc_if->free_ring_resources(pdata);
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
/* Disable the clocks */ /* Disable the clocks */
clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->ptpclk);
...@@ -1957,8 +2006,6 @@ static int xgbe_close(struct net_device *netdev) ...@@ -1957,8 +2006,6 @@ static int xgbe_close(struct net_device *netdev)
set_bit(XGBE_DOWN, &pdata->dev_state); set_bit(XGBE_DOWN, &pdata->dev_state);
DBGPR("<--xgbe_close\n");
return 0; return 0;
} }
......
...@@ -626,6 +626,217 @@ static int xgbe_get_ts_info(struct net_device *netdev, ...@@ -626,6 +626,217 @@ static int xgbe_get_ts_info(struct net_device *netdev,
return 0; return 0;
} }
static int xgbe_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
return pdata->phy_if.module_info(pdata, modinfo);
}
static int xgbe_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
return pdata->phy_if.module_eeprom(pdata, eeprom, data);
}
static void xgbe_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ringparam)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
ringparam->rx_pending = pdata->rx_desc_count;
ringparam->tx_pending = pdata->tx_desc_count;
}
static int xgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ringparam)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int rx, tx;
if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
netdev_err(netdev, "unsupported ring parameter\n");
return -EINVAL;
}
if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
(ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
netdev_err(netdev,
"rx ring parameter must be between %u and %u\n",
XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
return -EINVAL;
}
if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
(ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
netdev_err(netdev,
"tx ring parameter must be between %u and %u\n",
XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
return -EINVAL;
}
rx = __rounddown_pow_of_two(ringparam->rx_pending);
if (rx != ringparam->rx_pending)
netdev_notice(netdev,
"rx ring parameter rounded to power of two: %u\n",
rx);
tx = __rounddown_pow_of_two(ringparam->tx_pending);
if (tx != ringparam->tx_pending)
netdev_notice(netdev,
"tx ring parameter rounded to power of two: %u\n",
tx);
if ((rx == pdata->rx_desc_count) &&
(tx == pdata->tx_desc_count))
goto out;
pdata->rx_desc_count = rx;
pdata->tx_desc_count = tx;
xgbe_restart_dev(pdata);
out:
return 0;
}
static void xgbe_get_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int rx, tx, combined;
/* Calculate maximums allowed:
* - Take into account the number of available IRQs
* - Do not take into account the number of online CPUs so that
* the user can over-subscribe if desired
* - Tx is additionally limited by the number of hardware queues
*/
rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
rx = min(rx, pdata->channel_irq_count);
tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
tx = min(tx, pdata->channel_irq_count);
tx = min(tx, pdata->tx_max_q_count);
combined = min(rx, tx);
channels->max_combined = combined;
channels->max_rx = rx ? rx - 1 : 0;
channels->max_tx = tx ? tx - 1 : 0;
/* Get current settings based on device state */
rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
combined = min(rx, tx);
rx -= combined;
tx -= combined;
channels->combined_count = combined;
channels->rx_count = rx;
channels->tx_count = tx;
}
static void xgbe_print_set_channels_input(struct net_device *netdev,
struct ethtool_channels *channels)
{
netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n",
channels->combined_count, channels->rx_count,
channels->tx_count);
}
static int xgbe_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int rx, rx_curr, tx, tx_curr, combined;
/* Calculate maximums allowed:
* - Take into account the number of available IRQs
* - Do not take into account the number of online CPUs so that
* the user can over-subscribe if desired
* - Tx is additionally limited by the number of hardware queues
*/
rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
rx = min(rx, pdata->channel_irq_count);
tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
tx = min(tx, pdata->tx_max_q_count);
tx = min(tx, pdata->channel_irq_count);
combined = min(rx, tx);
/* Should not be setting other count */
if (channels->other_count) {
netdev_err(netdev,
"other channel count must be zero\n");
return -EINVAL;
}
/* Require at least one Combined (Rx and Tx) channel */
if (!channels->combined_count) {
netdev_err(netdev,
"at least one combined Rx/Tx channel is required\n");
xgbe_print_set_channels_input(netdev, channels);
return -EINVAL;
}
/* Check combined channels */
if (channels->combined_count > combined) {
netdev_err(netdev,
"combined channel count cannot exceed %u\n",
combined);
xgbe_print_set_channels_input(netdev, channels);
return -EINVAL;
}
/* Can have some Rx-only or Tx-only channels, but not both */
if (channels->rx_count && channels->tx_count) {
netdev_err(netdev,
"cannot specify both Rx-only and Tx-only channels\n");
xgbe_print_set_channels_input(netdev, channels);
return -EINVAL;
}
/* Check that we don't exceed the maximum number of channels */
if ((channels->combined_count + channels->rx_count) > rx) {
netdev_err(netdev,
"total Rx channels (%u) requested exceeds maximum available (%u)\n",
channels->combined_count + channels->rx_count, rx);
xgbe_print_set_channels_input(netdev, channels);
return -EINVAL;
}
if ((channels->combined_count + channels->tx_count) > tx) {
netdev_err(netdev,
"total Tx channels (%u) requested exceeds maximum available (%u)\n",
channels->combined_count + channels->tx_count, tx);
xgbe_print_set_channels_input(netdev, channels);
return -EINVAL;
}
rx = channels->combined_count + channels->rx_count;
tx = channels->combined_count + channels->tx_count;
rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
if ((rx == rx_curr) && (tx == tx_curr))
goto out;
pdata->new_rx_ring_count = rx;
pdata->new_tx_ring_count = tx;
xgbe_full_restart_dev(pdata);
out:
return 0;
}
static const struct ethtool_ops xgbe_ethtool_ops = { static const struct ethtool_ops xgbe_ethtool_ops = {
.get_drvinfo = xgbe_get_drvinfo, .get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel, .get_msglevel = xgbe_get_msglevel,
...@@ -646,6 +857,12 @@ static const struct ethtool_ops xgbe_ethtool_ops = { ...@@ -646,6 +857,12 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_ts_info = xgbe_get_ts_info, .get_ts_info = xgbe_get_ts_info,
.get_link_ksettings = xgbe_get_link_ksettings, .get_link_ksettings = xgbe_get_link_ksettings,
.set_link_ksettings = xgbe_set_link_ksettings, .set_link_ksettings = xgbe_set_link_ksettings,
.get_module_info = xgbe_get_module_info,
.get_module_eeprom = xgbe_get_module_eeprom,
.get_ringparam = xgbe_get_ringparam,
.set_ringparam = xgbe_set_ringparam,
.get_channels = xgbe_get_channels,
.set_channels = xgbe_set_channels,
}; };
const struct ethtool_ops *xgbe_get_ethtool_ops(void) const struct ethtool_ops *xgbe_get_ethtool_ops(void)
......
...@@ -265,7 +265,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) ...@@ -265,7 +265,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
{ {
struct net_device *netdev = pdata->netdev; struct net_device *netdev = pdata->netdev;
struct device *dev = pdata->dev; struct device *dev = pdata->dev;
unsigned int i;
int ret; int ret;
netdev->irq = pdata->dev_irq; netdev->irq = pdata->dev_irq;
...@@ -324,26 +323,9 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) ...@@ -324,26 +323,9 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
pdata->tx_ring_count, pdata->rx_ring_count); pdata->tx_ring_count, pdata->rx_ring_count);
} }
/* Set the number of queues */ /* Initialize RSS hash key */
ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
if (ret) {
dev_err(dev, "error setting real tx queue count\n");
return ret;
}
ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
if (ret) {
dev_err(dev, "error setting real rx queue count\n");
return ret;
}
/* Initialize RSS hash key and lookup table */
netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
i % pdata->rx_ring_count);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
......
...@@ -126,6 +126,24 @@ ...@@ -126,6 +126,24 @@
#include "xgbe.h" #include "xgbe.h"
#include "xgbe-common.h" #include "xgbe-common.h"
static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
struct ethtool_eeprom *eeprom, u8 *data)
{
if (!pdata->phy_if.phy_impl.module_eeprom)
return -ENXIO;
return pdata->phy_if.phy_impl.module_eeprom(pdata, eeprom, data);
}
static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
struct ethtool_modinfo *modinfo)
{
if (!pdata->phy_if.phy_impl.module_info)
return -ENXIO;
return pdata->phy_if.phy_impl.module_info(pdata, modinfo);
}
static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata) static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
{ {
int reg; int reg;
...@@ -198,31 +216,8 @@ static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata) ...@@ -198,31 +216,8 @@ static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
xgbe_an37_clear_interrupts(pdata); xgbe_an37_clear_interrupts(pdata);
} }
static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata)
{
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
reg |= XGBE_KR_TRAINING_ENABLE;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
}
static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata)
{
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
reg &= ~XGBE_KR_TRAINING_ENABLE;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
}
static void xgbe_kr_mode(struct xgbe_prv_data *pdata) static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
{ {
/* Enable KR training */
xgbe_an73_enable_kr_training(pdata);
/* Set MAC to 10G speed */ /* Set MAC to 10G speed */
pdata->hw_if.set_speed(pdata, SPEED_10000); pdata->hw_if.set_speed(pdata, SPEED_10000);
...@@ -232,9 +227,6 @@ static void xgbe_kr_mode(struct xgbe_prv_data *pdata) ...@@ -232,9 +227,6 @@ static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
{ {
/* Disable KR training */
xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 2.5G speed */ /* Set MAC to 2.5G speed */
pdata->hw_if.set_speed(pdata, SPEED_2500); pdata->hw_if.set_speed(pdata, SPEED_2500);
...@@ -244,9 +236,6 @@ static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) ...@@ -244,9 +236,6 @@ static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata) static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
{ {
/* Disable KR training */
xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 1G speed */ /* Set MAC to 1G speed */
pdata->hw_if.set_speed(pdata, SPEED_1000); pdata->hw_if.set_speed(pdata, SPEED_1000);
...@@ -260,9 +249,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata) ...@@ -260,9 +249,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
if (pdata->kr_redrv) if (pdata->kr_redrv)
return xgbe_kr_mode(pdata); return xgbe_kr_mode(pdata);
/* Disable KR training */
xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 10G speed */ /* Set MAC to 10G speed */
pdata->hw_if.set_speed(pdata, SPEED_10000); pdata->hw_if.set_speed(pdata, SPEED_10000);
...@@ -272,9 +258,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata) ...@@ -272,9 +258,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
static void xgbe_x_mode(struct xgbe_prv_data *pdata) static void xgbe_x_mode(struct xgbe_prv_data *pdata)
{ {
/* Disable KR training */
xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 1G speed */ /* Set MAC to 1G speed */
pdata->hw_if.set_speed(pdata, SPEED_1000); pdata->hw_if.set_speed(pdata, SPEED_1000);
...@@ -284,9 +267,6 @@ static void xgbe_x_mode(struct xgbe_prv_data *pdata) ...@@ -284,9 +267,6 @@ static void xgbe_x_mode(struct xgbe_prv_data *pdata)
static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
{ {
/* Disable KR training */
xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 1G speed */ /* Set MAC to 1G speed */
pdata->hw_if.set_speed(pdata, SPEED_1000); pdata->hw_if.set_speed(pdata, SPEED_1000);
...@@ -296,9 +276,6 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) ...@@ -296,9 +276,6 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
{ {
/* Disable KR training */
xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 1G speed */ /* Set MAC to 1G speed */
pdata->hw_if.set_speed(pdata, SPEED_1000); pdata->hw_if.set_speed(pdata, SPEED_1000);
...@@ -354,13 +331,15 @@ static void xgbe_switch_mode(struct xgbe_prv_data *pdata) ...@@ -354,13 +331,15 @@ static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
} }
static void xgbe_set_mode(struct xgbe_prv_data *pdata, static bool xgbe_set_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode mode) enum xgbe_mode mode)
{ {
if (mode == xgbe_cur_mode(pdata)) if (mode == xgbe_cur_mode(pdata))
return; return false;
xgbe_change_mode(pdata, mode); xgbe_change_mode(pdata, mode);
return true;
} }
static bool xgbe_use_mode(struct xgbe_prv_data *pdata, static bool xgbe_use_mode(struct xgbe_prv_data *pdata,
...@@ -407,6 +386,12 @@ static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable, ...@@ -407,6 +386,12 @@ static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable,
{ {
unsigned int reg; unsigned int reg;
/* Disable KR training for now */
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
reg &= ~XGBE_KR_TRAINING_ENABLE;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
/* Update AN settings */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
reg &= ~MDIO_AN_CTRL1_ENABLE; reg &= ~MDIO_AN_CTRL1_ENABLE;
...@@ -504,21 +489,19 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, ...@@ -504,21 +489,19 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
/* Start KR training */ /* Start KR training */
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
if (reg & XGBE_KR_TRAINING_ENABLE) {
if (pdata->phy_if.phy_impl.kr_training_pre) if (pdata->phy_if.phy_impl.kr_training_pre)
pdata->phy_if.phy_impl.kr_training_pre(pdata); pdata->phy_if.phy_impl.kr_training_pre(pdata);
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
reg |= XGBE_KR_TRAINING_ENABLE;
reg |= XGBE_KR_TRAINING_START; reg |= XGBE_KR_TRAINING_START;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
reg);
netif_dbg(pdata, link, pdata->netdev, netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n"); "KR training initiated\n");
if (pdata->phy_if.phy_impl.kr_training_post) if (pdata->phy_if.phy_impl.kr_training_post)
pdata->phy_if.phy_impl.kr_training_post(pdata); pdata->phy_if.phy_impl.kr_training_post(pdata);
}
return XGBE_AN_PAGE_RECEIVED; return XGBE_AN_PAGE_RECEIVED;
} }
...@@ -1197,21 +1180,23 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) ...@@ -1197,21 +1180,23 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
return 0; return 0;
} }
static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
{ {
int ret; int ret;
mutex_lock(&pdata->an_mutex);
set_bit(XGBE_LINK_INIT, &pdata->dev_state); set_bit(XGBE_LINK_INIT, &pdata->dev_state);
pdata->link_check = jiffies; pdata->link_check = jiffies;
ret = pdata->phy_if.phy_impl.an_config(pdata); ret = pdata->phy_if.phy_impl.an_config(pdata);
if (ret) if (ret)
return ret; goto out;
if (pdata->phy.autoneg != AUTONEG_ENABLE) { if (pdata->phy.autoneg != AUTONEG_ENABLE) {
ret = xgbe_phy_config_fixed(pdata); ret = xgbe_phy_config_fixed(pdata);
if (ret || !pdata->kr_redrv) if (ret || !pdata->kr_redrv)
return ret; goto out;
netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n"); netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n");
} else { } else {
...@@ -1221,6 +1206,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) ...@@ -1221,6 +1206,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation interrupt */ /* Disable auto-negotiation interrupt */
disable_irq(pdata->an_irq); disable_irq(pdata->an_irq);
if (set_mode) {
/* Start auto-negotiation in a supported mode */ /* Start auto-negotiation in a supported mode */
if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
xgbe_set_mode(pdata, XGBE_MODE_KR); xgbe_set_mode(pdata, XGBE_MODE_KR);
...@@ -1238,7 +1224,9 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) ...@@ -1238,7 +1224,9 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
} else { } else {
enable_irq(pdata->an_irq); enable_irq(pdata->an_irq);
return -EINVAL; ret = -EINVAL;
goto out;
}
} }
/* Disable and stop any in progress auto-negotiation */ /* Disable and stop any in progress auto-negotiation */
...@@ -1258,16 +1246,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) ...@@ -1258,16 +1246,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
xgbe_an_init(pdata); xgbe_an_init(pdata);
xgbe_an_restart(pdata); xgbe_an_restart(pdata);
return 0; out:
}
static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
{
int ret;
mutex_lock(&pdata->an_mutex);
ret = __xgbe_phy_config_aneg(pdata);
if (ret) if (ret)
set_bit(XGBE_LINK_ERR, &pdata->dev_state); set_bit(XGBE_LINK_ERR, &pdata->dev_state);
else else
...@@ -1278,6 +1257,16 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) ...@@ -1278,6 +1257,16 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
return ret; return ret;
} }
static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
{
return __xgbe_phy_config_aneg(pdata, true);
}
static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata)
{
return __xgbe_phy_config_aneg(pdata, false);
}
static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
{ {
return (pdata->an_result == XGBE_AN_COMPLETE); return (pdata->an_result == XGBE_AN_COMPLETE);
...@@ -1334,7 +1323,8 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) ...@@ -1334,7 +1323,8 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL; pdata->phy.duplex = DUPLEX_FULL;
xgbe_set_mode(pdata, mode); if (xgbe_set_mode(pdata, mode) && pdata->an_again)
xgbe_phy_reconfig_aneg(pdata);
} }
static void xgbe_phy_status(struct xgbe_prv_data *pdata) static void xgbe_phy_status(struct xgbe_prv_data *pdata)
...@@ -1639,4 +1629,7 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) ...@@ -1639,4 +1629,7 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
phy_if->phy_valid_speed = xgbe_phy_valid_speed; phy_if->phy_valid_speed = xgbe_phy_valid_speed;
phy_if->an_isr = xgbe_an_combined_isr; phy_if->an_isr = xgbe_an_combined_isr;
phy_if->module_info = xgbe_phy_module_info;
phy_if->module_eeprom = xgbe_phy_module_eeprom;
} }
...@@ -335,16 +335,33 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -335,16 +335,33 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pdata->awcr = XGBE_DMA_PCI_AWCR; pdata->awcr = XGBE_DMA_PCI_AWCR;
pdata->awarcr = XGBE_DMA_PCI_AWARCR; pdata->awarcr = XGBE_DMA_PCI_AWARCR;
/* Read the port property registers */
pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
if (netif_msg_probe(pdata)) {
dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0);
dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1);
dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2);
dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3);
dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4);
}
/* Set the maximum channels and queues */ /* Set the maximum channels and queues */
reg = XP_IOREAD(pdata, XP_PROP_1); pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); MAX_TX_DMA);
pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); MAX_RX_DMA);
pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
MAX_TX_QUEUES);
pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
MAX_RX_QUEUES);
if (netif_msg_probe(pdata)) { if (netif_msg_probe(pdata)) {
dev_dbg(dev, "max tx/rx channel count = %u/%u\n", dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
pdata->tx_max_channel_count, pdata->tx_max_channel_count,
pdata->tx_max_channel_count); pdata->rx_max_channel_count);
dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n", dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
pdata->tx_max_q_count, pdata->rx_max_q_count); pdata->tx_max_q_count, pdata->rx_max_q_count);
} }
...@@ -353,12 +370,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -353,12 +370,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
xgbe_set_counts(pdata); xgbe_set_counts(pdata);
/* Set the maximum fifo amounts */ /* Set the maximum fifo amounts */
reg = XP_IOREAD(pdata, XP_PROP_2); pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); TX_FIFO_SIZE);
pdata->tx_max_fifo_size *= 16384; pdata->tx_max_fifo_size *= 16384;
pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
pdata->vdata->tx_max_fifo_size); pdata->vdata->tx_max_fifo_size);
pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
RX_FIFO_SIZE);
pdata->rx_max_fifo_size *= 16384; pdata->rx_max_fifo_size *= 16384;
pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
pdata->vdata->rx_max_fifo_size); pdata->vdata->rx_max_fifo_size);
......
This diff is collapsed.
...@@ -144,6 +144,11 @@ ...@@ -144,6 +144,11 @@
#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) #define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
#define XGBE_RX_DESC_CNT 512 #define XGBE_RX_DESC_CNT 512
#define XGBE_TX_DESC_CNT_MIN 64
#define XGBE_TX_DESC_CNT_MAX 4096
#define XGBE_RX_DESC_CNT_MIN 64
#define XGBE_RX_DESC_CNT_MAX 4096
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contiguous TSO/GSO packet */ /* Descriptors required for maximum contiguous TSO/GSO packet */
...@@ -835,6 +840,7 @@ struct xgbe_hw_if { ...@@ -835,6 +840,7 @@ struct xgbe_hw_if {
* Optional routines: * Optional routines:
* an_pre, an_post * an_pre, an_post
* kr_training_pre, kr_training_post * kr_training_pre, kr_training_post
* module_info, module_eeprom
*/ */
struct xgbe_phy_impl_if { struct xgbe_phy_impl_if {
/* Perform Setup/teardown actions */ /* Perform Setup/teardown actions */
...@@ -883,6 +889,12 @@ struct xgbe_phy_impl_if { ...@@ -883,6 +889,12 @@ struct xgbe_phy_impl_if {
/* Pre/Post KR training enablement support */ /* Pre/Post KR training enablement support */
void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_pre)(struct xgbe_prv_data *);
void (*kr_training_post)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *);
/* SFP module related info */
int (*module_info)(struct xgbe_prv_data *pdata,
struct ethtool_modinfo *modinfo);
int (*module_eeprom)(struct xgbe_prv_data *pdata,
struct ethtool_eeprom *eeprom, u8 *data);
}; };
struct xgbe_phy_if { struct xgbe_phy_if {
...@@ -905,6 +917,12 @@ struct xgbe_phy_if { ...@@ -905,6 +917,12 @@ struct xgbe_phy_if {
/* For single interrupt support */ /* For single interrupt support */
irqreturn_t (*an_isr)(struct xgbe_prv_data *); irqreturn_t (*an_isr)(struct xgbe_prv_data *);
/* For ethtool PHY support */
int (*module_info)(struct xgbe_prv_data *pdata,
struct ethtool_modinfo *modinfo);
int (*module_eeprom)(struct xgbe_prv_data *pdata,
struct ethtool_eeprom *eeprom, u8 *data);
/* PHY implementation specific services */ /* PHY implementation specific services */
struct xgbe_phy_impl_if phy_impl; struct xgbe_phy_impl_if phy_impl;
}; };
...@@ -1027,6 +1045,13 @@ struct xgbe_prv_data { ...@@ -1027,6 +1045,13 @@ struct xgbe_prv_data {
void __iomem *xprop_regs; /* XGBE property registers */ void __iomem *xprop_regs; /* XGBE property registers */
void __iomem *xi2c_regs; /* XGBE I2C CSRs */ void __iomem *xi2c_regs; /* XGBE I2C CSRs */
/* Port property registers */
unsigned int pp0;
unsigned int pp1;
unsigned int pp2;
unsigned int pp3;
unsigned int pp4;
/* Overall device lock */ /* Overall device lock */
spinlock_t lock; spinlock_t lock;
...@@ -1097,6 +1122,9 @@ struct xgbe_prv_data { ...@@ -1097,6 +1122,9 @@ struct xgbe_prv_data {
unsigned int rx_ring_count; unsigned int rx_ring_count;
unsigned int rx_desc_count; unsigned int rx_desc_count;
unsigned int new_tx_ring_count;
unsigned int new_rx_ring_count;
unsigned int tx_max_q_count; unsigned int tx_max_q_count;
unsigned int rx_max_q_count; unsigned int rx_max_q_count;
unsigned int tx_q_count; unsigned int tx_q_count;
...@@ -1233,6 +1261,7 @@ struct xgbe_prv_data { ...@@ -1233,6 +1261,7 @@ struct xgbe_prv_data {
enum xgbe_rx kr_state; enum xgbe_rx kr_state;
enum xgbe_rx kx_state; enum xgbe_rx kx_state;
struct work_struct an_work; struct work_struct an_work;
unsigned int an_again;
unsigned int an_supported; unsigned int an_supported;
unsigned int parallel_detect; unsigned int parallel_detect;
unsigned int fec_ability; unsigned int fec_ability;
...@@ -1310,6 +1339,8 @@ int xgbe_powerup(struct net_device *, unsigned int); ...@@ -1310,6 +1339,8 @@ int xgbe_powerup(struct net_device *, unsigned int);
int xgbe_powerdown(struct net_device *, unsigned int); int xgbe_powerdown(struct net_device *, unsigned int);
void xgbe_init_rx_coalesce(struct xgbe_prv_data *); void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
void xgbe_init_tx_coalesce(struct xgbe_prv_data *); void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
void xgbe_restart_dev(struct xgbe_prv_data *pdata);
void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *); void xgbe_debugfs_init(struct xgbe_prv_data *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment