Commit 013724e9 authored by David S. Miller's avatar David S. Miller

Merge branch 'amd-xgbe-updates'

Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2016-11-03

This patch series is targeted at preparing the driver for a new PCI version
of the hardware.  After this series is applied, a follow-on series will
introduce the support for the PCI version of the hardware.

The following updates and fixes are included in this driver update series:

- Fix formatting of PCS debug register dump
- Prepare for priority-based FIFO allocation
- Implement priority-based FIFO allocation
- Prepare for working with more than one type of PCS/PHY
- Prepare for the introduction of clause 37 auto-negotiation
- Add support for clause 37 auto-negotiation
- Prepare for supporting a new PCS register access method
- Add support for 64-bit management counter registers
- Update DMA channel status determination
- Prepare for supporting PCI devices in addition to platform devices

This patch series is based on net-next.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4fb74506 bd8255d8
......@@ -2,7 +2,9 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
xgbe-ptp.o
xgbe-ptp.o \
xgbe-phy-v1.o \
xgbe-platform.o
amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
......@@ -790,6 +790,10 @@
#define MTL_Q_RQOMR_RSF_WIDTH 1
#define MTL_Q_RQOMR_RTC_INDEX 0
#define MTL_Q_RQOMR_RTC_WIDTH 2
#define MTL_Q_TQDR_TRCSTS_INDEX 1
#define MTL_Q_TQDR_TRCSTS_WIDTH 2
#define MTL_Q_TQDR_TXQSTS_INDEX 4
#define MTL_Q_TQDR_TXQSTS_WIDTH 1
#define MTL_Q_TQOMR_FTQ_INDEX 0
#define MTL_Q_TQOMR_FTQ_WIDTH 1
#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
......@@ -852,14 +856,9 @@
#define MTL_TSA_SP 0x00
#define MTL_TSA_ETS 0x02
/* PCS MMD select register offset
* The MMD select register is used for accessing PCS registers
* when the underlying APB3 interface is using indirect addressing.
* Indirect addressing requires accessing registers in two phases,
* an address phase and a data phase. The address phases requires
* writing an address selection value to the MMD select regiesters.
*/
#define PCS_MMD_SELECT 0xff
/* PCS register offsets */
#define PCS_V1_WINDOW_SELECT 0x03fc
#define PCS_V2_WINDOW_SELECT 0x9064
/* SerDes integration register offsets */
#define SIR0_KR_RT_1 0x002c
......@@ -1027,6 +1026,10 @@
#define MDIO_PMA_10GBR_FECCTRL 0x00ab
#endif
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
#ifndef MDIO_AN_XNP
#define MDIO_AN_XNP 0x0016
#endif
......@@ -1047,11 +1050,40 @@
#define MDIO_AN_INT 0x8002
#endif
#ifndef MDIO_VEND2_AN_ADVERTISE
#define MDIO_VEND2_AN_ADVERTISE 0x0004
#endif
#ifndef MDIO_VEND2_AN_LP_ABILITY
#define MDIO_VEND2_AN_LP_ABILITY 0x0005
#endif
#ifndef MDIO_VEND2_AN_CTRL
#define MDIO_VEND2_AN_CTRL 0x8001
#endif
#ifndef MDIO_VEND2_AN_STAT
#define MDIO_VEND2_AN_STAT 0x8002
#endif
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
#endif
#ifndef MDIO_VEND2_CTRL1_AN_RESTART
#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
#endif
/* MDIO mask values */
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
#define XGBE_AN_CL73_INC_LINK BIT(1)
#define XGBE_AN_CL73_PG_RCV BIT(2)
#define XGBE_AN_CL73_INT_MASK 0x07
#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
#define XGBE_XNP_ACK_PROCESSED BIT(12)
#define XGBE_XNP_MP_FORMATTED BIT(13)
......@@ -1060,6 +1092,19 @@
#define XGBE_KR_TRAINING_START BIT(0)
#define XGBE_KR_TRAINING_ENABLE BIT(1)
#define XGBE_PCS_CL37_BP BIT(12)
#define XGBE_AN_CL37_INT_CMPLT BIT(0)
#define XGBE_AN_CL37_INT_MASK 0x01
#define XGBE_AN_CL37_HD_MASK 0x40
#define XGBE_AN_CL37_FD_MASK 0x20
#define XGBE_AN_CL37_PCS_MODE_MASK 0x06
#define XGBE_AN_CL37_PCS_MODE_BASEX 0x00
#define XGBE_AN_CL37_PCS_MODE_SGMII 0x04
#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
......@@ -1195,12 +1240,18 @@ do { \
/* Macros for building, reading or writing register values or bits
* within the register values of XPCS registers.
*/
#define XPCS_IOWRITE(_pdata, _off, _val) \
#define XPCS32_IOWRITE(_pdata, _off, _val) \
iowrite32(_val, (_pdata)->xpcs_regs + (_off))
#define XPCS_IOREAD(_pdata, _off) \
#define XPCS32_IOREAD(_pdata, _off) \
ioread32((_pdata)->xpcs_regs + (_off))
#define XPCS16_IOWRITE(_pdata, _off, _val) \
iowrite16(_val, (_pdata)->xpcs_regs + (_off))
#define XPCS16_IOREAD(_pdata, _off) \
ioread16((_pdata)->xpcs_regs + (_off))
/* Macros for building, reading or writing register values or bits
* within the register values of SerDes integration registers.
*/
......
......@@ -123,6 +123,11 @@
#include "xgbe.h"
#include "xgbe-common.h"
static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
{
return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
}
static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
unsigned int usec)
{
......@@ -491,6 +496,27 @@ static void xgbe_config_rss(struct xgbe_prv_data *pdata)
"error configuring RSS, RSS disabled\n");
}
static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
unsigned int queue)
{
unsigned int prio, tc;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
/* Does this queue handle the priority? */
if (pdata->prio2q_map[prio] != queue)
continue;
/* Get the Traffic Class for this priority */
tc = pdata->ets->prio_tc[prio];
/* Check if PFC is enabled for this traffic class */
if (pdata->pfc->pfc_en & (1 << tc))
return true;
}
return false;
}
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
......@@ -528,27 +554,14 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++) {
unsigned int ehfc = 0;
if (pfc && ets) {
unsigned int prio;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
unsigned int tc;
/* Does this queue handle the priority? */
if (pdata->prio2q_map[prio] != i)
continue;
/* Get the Traffic Class for this priority */
tc = ets->prio_tc[prio];
/* Check if flow control should be enabled */
if (pfc->pfc_en & (1 << tc)) {
if (pdata->rx_rfd[i]) {
/* Flow control thresholds are established */
if (pfc && ets) {
if (xgbe_is_pfc_queue(pdata, i))
ehfc = 1;
break;
}
} else {
ehfc = 1;
}
} else {
ehfc = 1;
}
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
......@@ -704,32 +717,26 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
}
static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
{
if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
return 0;
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
unsigned int ss;
return 0;
}
static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
{
if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
return 0;
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
return 0;
}
static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
{
if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
return 0;
switch (speed) {
case SPEED_1000:
ss = 0x03;
break;
case SPEED_2500:
ss = 0x02;
break;
case SPEED_10000:
ss = 0x00;
break;
default:
return -EINVAL;
}
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
return 0;
}
......@@ -1019,8 +1026,71 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
return 0;
}
static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
{
unsigned long flags;
unsigned int mmd_address, index, offset;
int mmd_data;
if (mmd_reg & MII_ADDR_C45)
mmd_address = mmd_reg & ~MII_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
* phases, an address phase and a data phase.
*
* The mmio interface is based on 16-bit offsets and values. All
* register offsets must therefore be adjusted by left shifting the
* offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
index = mmd_address & ~pdata->xpcs_window_mask;
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
mmd_data = XPCS16_IOREAD(pdata, offset);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
return mmd_data;
}
static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg, int mmd_data)
{
unsigned long flags;
unsigned int mmd_address, index, offset;
if (mmd_reg & MII_ADDR_C45)
mmd_address = mmd_reg & ~MII_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
* phases, an address phase and a data phase.
*
* The mmio interface is based on 16-bit offsets and values. All
* register offsets must therefore be adjusted by left shifting the
* offset 1 bit and writing 16 bits of data.
*/
mmd_address <<= 1;
index = mmd_address & ~pdata->xpcs_window_mask;
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
XPCS16_IOWRITE(pdata, offset, mmd_data);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
{
unsigned long flags;
unsigned int mmd_address;
......@@ -1041,15 +1111,15 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
* offset 2 bits and reading 32 bits of data.
*/
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
return mmd_data;
}
static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg, int mmd_data)
static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg, int mmd_data)
{
unsigned int mmd_address;
unsigned long flags;
......@@ -1066,14 +1136,40 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
*
* The mmio interface is based on 32-bit offsets and values. All
* register offsets must therefore be adjusted by left shifting the
* offset 2 bits and reading 32 bits of data.
* offset 2 bits and writing 32 bits of data.
*/
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
{
switch (pdata->vdata->xpcs_access) {
case XGBE_XPCS_ACCESS_V1:
return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
}
}
static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg, int mmd_data)
{
switch (pdata->vdata->xpcs_access) {
case XGBE_XPCS_ACCESS_V1:
return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
}
}
static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
{
return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
......@@ -1327,106 +1423,6 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
return 0;
}
static void xgbe_config_tc(struct xgbe_prv_data *pdata)
{
unsigned int offset, queue, prio;
u8 i;
netdev_reset_tc(pdata->netdev);
if (!pdata->num_tcs)
return;
netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
while ((queue < pdata->tx_q_count) &&
(pdata->q2tc_map[queue] == i))
queue++;
netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
i, offset, queue - 1);
netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
offset = queue;
}
if (!pdata->ets)
return;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
netdev_set_prio_tc_map(pdata->netdev, prio,
pdata->ets->prio_tc[prio]);
}
static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
{
struct ieee_ets *ets = pdata->ets;
unsigned int total_weight, min_weight, weight;
unsigned int mask, reg, reg_val;
unsigned int i, prio;
if (!ets)
return;
/* Set Tx to deficit weighted round robin scheduling algorithm (when
* traffic class is using ETS algorithm)
*/
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
/* Set Traffic Class algorithms */
total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
min_weight = total_weight / 100;
if (!min_weight)
min_weight = 1;
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
/* Map the priorities to the traffic class */
mask = 0;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
if (ets->prio_tc[prio] == i)
mask |= (1 << prio);
}
mask &= 0xff;
netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
i, mask);
reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
reg_val = XGMAC_IOREAD(pdata, reg);
reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
XGMAC_IOWRITE(pdata, reg, reg_val);
/* Set the traffic class algorithm */
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
netif_dbg(pdata, drv, pdata->netdev,
"TC%u using SP\n", i);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_SP);
break;
case IEEE_8021QAZ_TSA_ETS:
weight = total_weight * ets->tc_tx_bw[i] / 100;
weight = clamp(weight, min_weight, total_weight);
netif_dbg(pdata, drv, pdata->netdev,
"TC%u using DWRR (weight %u)\n", i, weight);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_ETS);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
weight);
break;
}
}
xgbe_config_tc(pdata);
}
static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
{
xgbe_config_flow_control(pdata);
}
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
......@@ -2000,61 +1996,331 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
}
static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
unsigned int queue_count)
static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
unsigned int queue,
unsigned int q_fifo_size)
{
unsigned int frame_fifo_size;
unsigned int rfa, rfd;
frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
/* PFC is active for this queue */
rfa = pdata->pfc_rfa;
rfd = rfa + frame_fifo_size;
if (rfd > XGMAC_FLOW_CONTROL_MAX)
rfd = XGMAC_FLOW_CONTROL_MAX;
if (rfa >= XGMAC_FLOW_CONTROL_MAX)
rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
} else {
/* This path deals with just maximum frame sizes which are
* limited to a jumbo frame of 9,000 (plus headers, etc.)
* so we can never exceed the maximum allowable RFA/RFD
* values.
*/
if (q_fifo_size <= 2048) {
/* rx_rfd to zero to signal no flow control */
pdata->rx_rfa[queue] = 0;
pdata->rx_rfd[queue] = 0;
return;
}
if (q_fifo_size <= 4096) {
/* Between 2048 and 4096 */
pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
return;
}
if (q_fifo_size <= frame_fifo_size) {
/* Between 4096 and max-frame */
pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
return;
}
if (q_fifo_size <= (frame_fifo_size * 3)) {
/* Between max-frame and 3 max-frames,
* trigger if we get just over a frame of data and
* resume when we have just under half a frame left.
*/
rfa = q_fifo_size - frame_fifo_size;
rfd = rfa + (frame_fifo_size / 2);
} else {
/* Above 3 max-frames - trigger when just over
* 2 frames of space available
*/
rfa = frame_fifo_size * 2;
rfa += XGMAC_FLOW_CONTROL_UNIT;
rfd = rfa + frame_fifo_size;
}
}
pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
}
static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
unsigned int *fifo)
{
unsigned int q_fifo_size;
unsigned int p_fifo;
unsigned int i;
/* Calculate the configured fifo size */
q_fifo_size = 1 << (fifo_size + 7);
for (i = 0; i < pdata->rx_q_count; i++) {
q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
}
}
static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
{
unsigned int i;
for (i = 0; i < pdata->rx_q_count; i++) {
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
pdata->rx_rfa[i]);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
pdata->rx_rfd[i]);
}
}
static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
{
/* The configured value may not be the actual amount of fifo RAM */
q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
return min_t(unsigned int, pdata->tx_max_fifo_size,
pdata->hw_feat.tx_fifo_size);
}
q_fifo_size = q_fifo_size / queue_count;
static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
{
/* The configured value may not be the actual amount of fifo RAM */
return min_t(unsigned int, pdata->rx_max_fifo_size,
pdata->hw_feat.rx_fifo_size);
}
static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
unsigned int queue_count,
unsigned int *fifo)
{
unsigned int q_fifo_size;
unsigned int p_fifo;
unsigned int i;
q_fifo_size = fifo_size / queue_count;
/* Each increment in the queue fifo size represents 256 bytes of
* fifo, with 0 representing 256 bytes. Distribute the fifo equally
* between the queues.
/* Calculate the fifo setting by dividing the queue's fifo size
* by the fifo allocation increment (with 0 representing the
* base allocation increment so decrement the result by 1).
*/
p_fifo = q_fifo_size / 256;
p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
if (p_fifo)
p_fifo--;
return p_fifo;
/* Distribute the fifo equally amongst the queues */
for (i = 0; i < queue_count; i++)
fifo[i] = p_fifo;
}
static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
unsigned int queue_count,
unsigned int *fifo)
{
unsigned int i;
BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
if (queue_count <= IEEE_8021QAZ_MAX_TCS)
return fifo_size;
/* Rx queues 9 and up are for specialized packets,
* such as PTP or DCB control packets, etc. and
* don't require a large fifo
*/
for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
fifo_size -= XGMAC_FIFO_MIN_ALLOC;
}
return fifo_size;
}
static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
{
unsigned int delay;
/* If a delay has been provided, use that */
if (pdata->pfc->delay)
return pdata->pfc->delay / 8;
/* Allow for two maximum size frames */
delay = xgbe_get_max_frame(pdata);
delay += XGMAC_ETH_PREAMBLE;
delay *= 2;
/* Allow for PFC frame */
delay += XGMAC_PFC_DATA_LEN;
delay += ETH_HLEN + ETH_FCS_LEN;
delay += XGMAC_ETH_PREAMBLE;
/* Allow for miscellaneous delays (LPI exit, cable, etc.) */
delay += XGMAC_PFC_DELAYS;
return delay;
}
static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
{
unsigned int count, prio_queues;
unsigned int i;
if (!pdata->pfc->pfc_en)
return 0;
count = 0;
prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
for (i = 0; i < prio_queues; i++) {
if (!xgbe_is_pfc_queue(pdata, i))
continue;
pdata->pfcq[i] = 1;
count++;
}
return count;
}
static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
unsigned int fifo_size,
unsigned int *fifo)
{
unsigned int q_fifo_size, rem_fifo, addn_fifo;
unsigned int prio_queues;
unsigned int pfc_count;
unsigned int i;
q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
pfc_count = xgbe_get_pfc_queues(pdata);
if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
/* No traffic classes with PFC enabled or can't do lossless */
xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
return;
}
/* Calculate how much fifo we have to play with */
rem_fifo = fifo_size - (q_fifo_size * prio_queues);
/* Calculate how much more than base fifo PFC needs, which also
* becomes the threshold activation point (RFA)
*/
pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
if (pdata->pfc_rfa > q_fifo_size) {
addn_fifo = pdata->pfc_rfa - q_fifo_size;
addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
} else {
addn_fifo = 0;
}
/* Calculate DCB fifo settings:
* - distribute remaining fifo between the VLAN priority
* queues based on traffic class PFC enablement and overall
* priority (0 is lowest priority, so start at highest)
*/
i = prio_queues;
while (i > 0) {
i--;
fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
if (!pdata->pfcq[i] || !addn_fifo)
continue;
if (addn_fifo > rem_fifo) {
netdev_warn(pdata->netdev,
"RXq%u cannot set needed fifo size\n", i);
if (!rem_fifo)
continue;
addn_fifo = rem_fifo;
}
fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
rem_fifo -= addn_fifo;
}
if (rem_fifo) {
unsigned int inc_fifo = rem_fifo / prio_queues;
/* Distribute remaining fifo across queues */
for (i = 0; i < prio_queues; i++)
fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
}
}
static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
{
unsigned int fifo_size;
unsigned int fifo[XGBE_MAX_QUEUES];
unsigned int i;
fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
pdata->tx_q_count);
fifo_size = xgbe_get_tx_fifo_size(pdata);
xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
netif_info(pdata, drv, pdata->netdev,
"%d Tx hardware queues, %d byte fifo per queue\n",
pdata->tx_q_count, ((fifo_size + 1) * 256));
pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
}
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
{
unsigned int fifo_size;
unsigned int fifo[XGBE_MAX_QUEUES];
unsigned int prio_queues;
unsigned int i;
fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
pdata->rx_q_count);
/* Clear any DCB related fifo/queue information */
memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
pdata->pfc_rfa = 0;
fifo_size = xgbe_get_rx_fifo_size(pdata);
prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
/* Assign a minimum fifo to the non-VLAN priority queues */
fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
if (pdata->pfc && pdata->ets)
xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
else
xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
netif_info(pdata, drv, pdata->netdev,
"%d Rx hardware queues, %d byte fifo per queue\n",
pdata->rx_q_count, ((fifo_size + 1) * 256));
xgbe_calculate_flow_control_threshold(pdata, fifo);
xgbe_config_flow_control_threshold(pdata);
if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
netif_info(pdata, drv, pdata->netdev,
"%u Rx hardware queues\n", pdata->rx_q_count);
for (i = 0; i < pdata->rx_q_count; i++)
netif_info(pdata, drv, pdata->netdev,
"RxQ%u, %u byte fifo queue\n", i,
((fifo[i] + 1) * XGMAC_FIFO_UNIT));
} else {
netif_info(pdata, drv, pdata->netdev,
"%u Rx hardware queues, %u byte fifo per queue\n",
pdata->rx_q_count,
((fifo[0] + 1) * XGMAC_FIFO_UNIT));
}
}
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
......@@ -2090,8 +2356,7 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
}
/* Map the 8 VLAN priority values to available MTL Rx queues */
prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
pdata->rx_q_count);
prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
......@@ -2139,16 +2404,120 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
}
}
static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
static void xgbe_config_tc(struct xgbe_prv_data *pdata)
{
unsigned int i;
unsigned int offset, queue, prio;
u8 i;
for (i = 0; i < pdata->rx_q_count; i++) {
/* Activate flow control when less than 4k left in fifo */
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
netdev_reset_tc(pdata->netdev);
if (!pdata->num_tcs)
return;
netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
while ((queue < pdata->tx_q_count) &&
(pdata->q2tc_map[queue] == i))
queue++;
netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
i, offset, queue - 1);
netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
offset = queue;
}
if (!pdata->ets)
return;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
netdev_set_prio_tc_map(pdata->netdev, prio,
pdata->ets->prio_tc[prio]);
}
static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
{
struct ieee_ets *ets = pdata->ets;
unsigned int total_weight, min_weight, weight;
unsigned int mask, reg, reg_val;
unsigned int i, prio;
if (!ets)
return;
/* Set Tx to deficit weighted round robin scheduling algorithm (when
* traffic class is using ETS algorithm)
*/
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
/* Set Traffic Class algorithms */
total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
min_weight = total_weight / 100;
if (!min_weight)
min_weight = 1;
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
/* Map the priorities to the traffic class */
mask = 0;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
if (ets->prio_tc[prio] == i)
mask |= (1 << prio);
}
mask &= 0xff;
netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
i, mask);
reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
reg_val = XGMAC_IOREAD(pdata, reg);
reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
XGMAC_IOWRITE(pdata, reg, reg_val);
/* Set the traffic class algorithm */
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
netif_dbg(pdata, drv, pdata->netdev,
"TC%u using SP\n", i);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_SP);
break;
case IEEE_8021QAZ_TSA_ETS:
weight = total_weight * ets->tc_tx_bw[i] / 100;
weight = clamp(weight, min_weight, total_weight);
netif_dbg(pdata, drv, pdata->netdev,
"TC%u using DWRR (weight %u)\n", i, weight);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_ETS);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
weight);
break;
}
}
xgbe_config_tc(pdata);
}
static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
{
if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
/* Just stop the Tx queues while Rx fifo is changed */
netif_tx_stop_all_queues(pdata->netdev);
/* Suspend Rx so that fifo's can be adjusted */
pdata->hw_if.disable_rx(pdata);
}
xgbe_config_rx_fifo_size(pdata);
xgbe_config_flow_control(pdata);
/* De-activate flow control when more than 6k left in fifo */
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
/* Resume Rx */
pdata->hw_if.enable_rx(pdata);
/* Resume Tx queues */
netif_tx_start_all_queues(pdata->netdev);
}
}
......@@ -2175,19 +2544,7 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
{
switch (pdata->phy_speed) {
case SPEED_10000:
xgbe_set_xgmii_speed(pdata);
break;
case SPEED_2500:
xgbe_set_gmii_2500_speed(pdata);
break;
case SPEED_1000:
xgbe_set_gmii_speed(pdata);
break;
}
xgbe_set_speed(pdata, pdata->phy_speed);
}
static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
......@@ -2223,17 +2580,33 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
bool read_hi;
u64 val;
switch (reg_lo) {
/* These registers are always 64 bit */
case MMC_TXOCTETCOUNT_GB_LO:
case MMC_TXOCTETCOUNT_G_LO:
case MMC_RXOCTETCOUNT_GB_LO:
case MMC_RXOCTETCOUNT_G_LO:
read_hi = true;
break;
if (pdata->vdata->mmc_64bit) {
switch (reg_lo) {
/* These registers are always 32 bit */
case MMC_RXRUNTERROR:
case MMC_RXJABBERERROR:
case MMC_RXUNDERSIZE_G:
case MMC_RXOVERSIZE_G:
case MMC_RXWATCHDOGERROR:
read_hi = false;
break;
default:
read_hi = false;
default:
read_hi = true;
}
} else {
switch (reg_lo) {
/* These registers are always 64 bit */
case MMC_TXOCTETCOUNT_GB_LO:
case MMC_TXOCTETCOUNT_G_LO:
case MMC_RXOCTETCOUNT_GB_LO:
case MMC_RXOCTETCOUNT_G_LO:
read_hi = true;
break;
default:
read_hi = false;
}
}
val = XGMAC_IOREAD(pdata, reg_lo);
......@@ -2563,20 +2936,48 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
}
static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
unsigned int queue)
{
unsigned int tx_status;
unsigned long tx_timeout;
/* The Tx engine cannot be stopped if it is actively processing
* packets. Wait for the Tx queue to empty the Tx fifo. Don't
* wait forever though...
*/
tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
while (time_before(jiffies, tx_timeout)) {
tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
(XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
break;
usleep_range(500, 1000);
}
if (!time_before(jiffies, tx_timeout))
netdev_info(pdata->netdev,
"timed out waiting for Tx queue %u to empty\n",
queue);
}
static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
struct xgbe_channel *channel)
unsigned int queue)
{
unsigned int tx_dsr, tx_pos, tx_qidx;
unsigned int tx_status;
unsigned long tx_timeout;
if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
return xgbe_txq_prepare_tx_stop(pdata, queue);
/* Calculate the status register to read and the position within */
if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
if (queue < DMA_DSRX_FIRST_QUEUE) {
tx_dsr = DMA_DSR0;
tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
DMA_DSR0_TPS_START;
tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
} else {
tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
......@@ -2601,7 +3002,7 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
if (!time_before(jiffies, tx_timeout))
netdev_info(pdata->netdev,
"timed out waiting for Tx DMA channel %u to stop\n",
channel->queue_index);
queue);
}
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
......@@ -2633,13 +3034,8 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
unsigned int i;
/* Prepare for Tx DMA channel stop */
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
break;
xgbe_prepare_tx_stop(pdata, channel);
}
for (i = 0; i < pdata->tx_q_count; i++)
xgbe_prepare_tx_stop(pdata, i);
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
......@@ -2763,13 +3159,8 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
unsigned int i;
/* Prepare for Tx DMA channel stop */
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
break;
xgbe_prepare_tx_stop(pdata, channel);
}
for (i = 0; i < pdata->tx_q_count; i++)
xgbe_prepare_tx_stop(pdata, i);
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
......@@ -2856,12 +3247,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
xgbe_config_tx_fifo_size(pdata);
xgbe_config_rx_fifo_size(pdata);
xgbe_config_flow_control_threshold(pdata);
/*TODO: Error Packet and undersized good Packet forwarding enable
(FEP and FUP)
*/
xgbe_config_dcb_tc(pdata);
xgbe_config_dcb_pfc(pdata);
xgbe_enable_mtl_interrupts(pdata);
/*
......@@ -2903,9 +3292,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->read_mmd_regs = xgbe_read_mmd_regs;
hw_if->write_mmd_regs = xgbe_write_mmd_regs;
hw_if->set_gmii_speed = xgbe_set_gmii_speed;
hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
hw_if->set_speed = xgbe_set_speed;
hw_if->enable_tx = xgbe_enable_tx;
hw_if->disable_tx = xgbe_disable_tx;
......
......@@ -114,7 +114,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
......@@ -160,18 +159,8 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
if (pdata->per_channel_irq) {
/* Get the DMA interrupt (offset 1) */
ret = platform_get_irq(pdata->pdev, i + 1);
if (ret < 0) {
netdev_err(pdata->netdev,
"platform_get_irq %u failed\n",
i + 1);
goto err_irq;
}
channel->dma_irq = ret;
}
if (pdata->per_channel_irq)
channel->dma_irq = pdata->channel_irq[i];
if (i < pdata->tx_ring_count) {
spin_lock_init(&tx_ring->lock);
......@@ -194,9 +183,6 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
return 0;
err_irq:
kfree(rx_ring);
err_rx_ring:
kfree(tx_ring);
......@@ -590,6 +576,10 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->tx_ch_cnt++;
hw_feat->tc_cnt++;
/* Translate the fifo sizes into actual numbers */
hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
DBGPR("<--xgbe_get_all_hw_features\n");
}
......@@ -778,7 +768,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_rx_data\n");
}
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
......@@ -1292,8 +1282,8 @@ static int xgbe_open(struct net_device *netdev)
DBGPR("-->xgbe_open\n");
/* Initialize the phy */
ret = xgbe_phy_init(pdata);
/* Reset the phy settings */
ret = xgbe_phy_reset(pdata);
if (ret)
return ret;
......
......@@ -316,24 +316,7 @@ static int xgbe_set_settings(struct net_device *netdev,
}
if (cmd->autoneg == AUTONEG_DISABLE) {
switch (speed) {
case SPEED_10000:
break;
case SPEED_2500:
if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) {
netdev_err(netdev, "unsupported speed %u\n",
speed);
return -EINVAL;
}
break;
case SPEED_1000:
if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) {
netdev_err(netdev, "unsupported speed %u\n",
speed);
return -EINVAL;
}
break;
default:
if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
netdev_err(netdev, "unsupported speed %u\n", speed);
return -EINVAL;
}
......
......@@ -116,19 +116,10 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
#include <linux/mdio.h>
#include "xgbe.h"
#include "xgbe-common.h"
......@@ -145,42 +136,6 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP);
static const u32 xgbe_serdes_blwc[] = {
XGBE_SPEED_1000_BLWC,
XGBE_SPEED_2500_BLWC,
XGBE_SPEED_10000_BLWC,
};
static const u32 xgbe_serdes_cdr_rate[] = {
XGBE_SPEED_1000_CDR,
XGBE_SPEED_2500_CDR,
XGBE_SPEED_10000_CDR,
};
static const u32 xgbe_serdes_pq_skew[] = {
XGBE_SPEED_1000_PQ,
XGBE_SPEED_2500_PQ,
XGBE_SPEED_10000_PQ,
};
static const u32 xgbe_serdes_tx_amp[] = {
XGBE_SPEED_1000_TXAMP,
XGBE_SPEED_2500_TXAMP,
XGBE_SPEED_10000_TXAMP,
};
static const u32 xgbe_serdes_dfe_tap_cfg[] = {
XGBE_SPEED_1000_DFE_TAP_CONFIG,
XGBE_SPEED_2500_DFE_TAP_CONFIG,
XGBE_SPEED_10000_DFE_TAP_CONFIG,
};
static const u32 xgbe_serdes_dfe_tap_ena[] = {
XGBE_SPEED_1000_DFE_TAP_ENABLE,
XGBE_SPEED_2500_DFE_TAP_ENABLE,
XGBE_SPEED_10000_DFE_TAP_ENABLE,
};
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
......@@ -207,160 +162,25 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
xgbe_init_function_ptrs_dev(&pdata->hw_if);
xgbe_init_function_ptrs_phy(&pdata->phy_if);
xgbe_init_function_ptrs_desc(&pdata->desc_if);
}
#ifdef CONFIG_ACPI
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
u32 property;
int ret;
/* Obtain the system clock setting */
ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
if (ret) {
dev_err(dev, "unable to obtain %s property\n",
XGBE_ACPI_DMA_FREQ);
return ret;
}
pdata->sysclk_rate = property;
/* Obtain the PTP clock setting */
ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
if (ret) {
dev_err(dev, "unable to obtain %s property\n",
XGBE_ACPI_PTP_FREQ);
return ret;
}
pdata->ptpclk_rate = property;
return 0;
}
#else /* CONFIG_ACPI */
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
#endif /* CONFIG_ACPI */
#ifdef CONFIG_OF
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
/* Obtain the system clock setting */
pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
if (IS_ERR(pdata->sysclk)) {
dev_err(dev, "dma devm_clk_get failed\n");
return PTR_ERR(pdata->sysclk);
}
pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
/* Obtain the PTP clock setting */
pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
if (IS_ERR(pdata->ptpclk)) {
dev_err(dev, "ptp devm_clk_get failed\n");
return PTR_ERR(pdata->ptpclk);
}
pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
return 0;
}
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
struct device_node *phy_node;
struct platform_device *phy_pdev;
phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
if (phy_node) {
/* Old style device tree:
* The XGBE and PHY resources are separate
*/
phy_pdev = of_find_device_by_node(phy_node);
of_node_put(phy_node);
} else {
/* New style device tree:
* The XGBE and PHY resources are grouped together with
* the PHY resources listed last
*/
get_device(dev);
phy_pdev = pdata->pdev;
}
return phy_pdev;
}
#else /* CONFIG_OF */
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
return NULL;
}
#endif /* CONFIG_OF */
static unsigned int xgbe_resource_count(struct platform_device *pdev,
unsigned int type)
{
unsigned int count;
int i;
for (i = 0, count = 0; i < pdev->num_resources; i++) {
struct resource *res = &pdev->resource[i];
if (type == resource_type(res))
count++;
}
return count;
}
static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
{
struct platform_device *phy_pdev;
if (pdata->use_acpi) {
get_device(pdata->dev);
phy_pdev = pdata->pdev;
} else {
phy_pdev = xgbe_of_get_phy_pdev(pdata);
}
return phy_pdev;
pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
}
static int xgbe_probe(struct platform_device *pdev)
struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
{
struct xgbe_prv_data *pdata;
struct net_device *netdev;
struct device *dev = &pdev->dev, *phy_dev;
struct platform_device *phy_pdev;
struct resource *res;
const char *phy_mode;
unsigned int i, phy_memnum, phy_irqnum;
enum dev_dma_attr attr;
int ret;
DBGPR("--> xgbe_probe\n");
netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
XGBE_MAX_DMA_CHANNELS);
if (!netdev) {
dev_err(dev, "alloc_etherdev failed\n");
ret = -ENOMEM;
goto err_alloc;
dev_err(dev, "alloc_etherdev_mq failed\n");
return ERR_PTR(-ENOMEM);
}
SET_NETDEV_DEV(netdev, dev);
pdata = netdev_priv(netdev);
pdata->netdev = netdev;
pdata->pdev = pdev;
pdata->adev = ACPI_COMPANION(dev);
pdata->dev = dev;
platform_set_drvdata(pdev, netdev);
spin_lock_init(&pdata->lock);
spin_lock_init(&pdata->xpcs_lock);
......@@ -371,291 +191,82 @@ static int xgbe_probe(struct platform_device *pdev)
set_bit(XGBE_DOWN, &pdata->dev_state);
/* Check if we should use ACPI or DT */
pdata->use_acpi = dev->of_node ? 0 : 1;
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
dev_err(dev, "unable to obtain phy device\n");
ret = -EINVAL;
goto err_phydev;
}
phy_dev = &phy_pdev->dev;
if (pdev == phy_pdev) {
/* New style device tree or ACPI:
* The XGBE and PHY resources are grouped together with
* the PHY resources listed last
*/
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
} else {
/* Old style device tree:
* The XGBE and PHY resources are separate
*/
phy_memnum = 0;
phy_irqnum = 0;
}
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
dev_err(dev, "tx descriptor count (%d) is not valid\n",
pdata->tx_desc_count);
ret = -EINVAL;
goto err_io;
}
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
pdata->rx_desc_count = XGBE_RX_DESC_CNT;
if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
dev_err(dev, "rx descriptor count (%d) is not valid\n",
pdata->rx_desc_count);
ret = -EINVAL;
goto err_io;
}
/* Obtain the mmio areas for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdata->xgmac_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->xgmac_regs)) {
dev_err(dev, "xgmac ioremap failed\n");
ret = PTR_ERR(pdata->xgmac_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
pdata->xpcs_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->xpcs_regs)) {
dev_err(dev, "xpcs ioremap failed\n");
ret = PTR_ERR(pdata->xpcs_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->rxtx_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(pdata->rxtx_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->sir0_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
ret = PTR_ERR(pdata->sir0_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->sir1_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
ret = PTR_ERR(pdata->sir1_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
/* Retrieve the MAC address */
ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
pdata->mac_addr,
sizeof(pdata->mac_addr));
if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
if (!ret)
ret = -EINVAL;
goto err_io;
}
/* Retrieve the PHY mode - it must be "xgmii" */
ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
&phy_mode);
if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
if (!ret)
ret = -EINVAL;
goto err_io;
}
pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
return pdata;
}
/* Check for per channel interrupt support */
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
pdata->per_channel_irq = 1;
void xgbe_free_pdata(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = pdata->netdev;
/* Retrieve the PHY speedset */
ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
&pdata->speed_set);
if (ret) {
dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
goto err_io;
}
free_netdev(netdev);
}
switch (pdata->speed_set) {
case XGBE_SPEEDSET_1000_10000:
case XGBE_SPEEDSET_2500_10000:
break;
default:
dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
ret = -EINVAL;
goto err_io;
}
void xgbe_set_counts(struct xgbe_prv_data *pdata)
{
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
/* Retrieve the PHY configuration properties */
if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_BLWC_PROPERTY,
pdata->serdes_blwc,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_BLWC_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
sizeof(pdata->serdes_blwc));
}
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_CDR_RATE_PROPERTY,
pdata->serdes_cdr_rate,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_CDR_RATE_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
sizeof(pdata->serdes_cdr_rate));
}
/* Set default max values if not provided */
if (!pdata->tx_max_channel_count)
pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
if (!pdata->rx_max_channel_count)
pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_PQ_SKEW_PROPERTY,
pdata->serdes_pq_skew,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_PQ_SKEW_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
sizeof(pdata->serdes_pq_skew));
}
if (!pdata->tx_max_q_count)
pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
if (!pdata->rx_max_q_count)
pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_TX_AMP_PROPERTY,
pdata->serdes_tx_amp,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_TX_AMP_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
sizeof(pdata->serdes_tx_amp));
}
/* Calculate the number of Tx and Rx rings to be created
* -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
* the number of Tx queues to the number of Tx channels
* enabled
* -Rx (DMA) Channels do not map 1-to-1 so use the actual
* number of Rx queues or maximum allowed
*/
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
pdata->tx_max_channel_count);
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
pdata->tx_max_q_count);
if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_DFE_CFG_PROPERTY,
pdata->serdes_dfe_tap_cfg,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_DFE_CFG_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
sizeof(pdata->serdes_dfe_tap_cfg));
}
pdata->tx_q_count = pdata->tx_ring_count;
if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_DFE_ENA_PROPERTY,
pdata->serdes_dfe_tap_ena,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_DFE_ENA_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
sizeof(pdata->serdes_dfe_tap_ena));
}
pdata->rx_ring_count = min_t(unsigned int,
netif_get_num_default_rss_queues(),
pdata->hw_feat.rx_ch_cnt);
pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
pdata->rx_max_channel_count);
/* Obtain device settings unique to ACPI/OF */
if (pdata->use_acpi)
ret = xgbe_acpi_support(pdata);
else
ret = xgbe_of_support(pdata);
if (ret)
goto err_io;
/* Set the DMA coherency values */
attr = device_get_dma_attr(dev);
if (attr == DEV_DMA_NOT_SUPPORTED) {
dev_err(dev, "DMA is not supported");
ret = -ENODEV;
goto err_io;
}
pdata->coherent = (attr == DEV_DMA_COHERENT);
if (pdata->coherent) {
pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
pdata->arcache = XGBE_DMA_OS_ARCACHE;
pdata->awcache = XGBE_DMA_OS_AWCACHE;
} else {
pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
pdata->arcache = XGBE_DMA_SYS_ARCACHE;
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
pdata->rx_max_q_count);
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io;
if (netif_msg_probe(pdata)) {
dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
pdata->tx_ring_count, pdata->rx_ring_count);
dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
pdata->tx_q_count, pdata->rx_q_count);
}
pdata->dev_irq = ret;
}
/* Get the auto-negotiation interrupt */
ret = platform_get_irq(phy_pdev, phy_irqnum++);
if (ret < 0) {
dev_err(dev, "platform_get_irq phy 0 failed\n");
goto err_io;
}
pdata->an_irq = ret;
int xgbe_config_netdev(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = pdata->netdev;
struct device *dev = pdata->dev;
unsigned int i;
int ret;
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
/* Issue software reset to device */
pdata->hw_if.exit(pdata);
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
/* Set default configuration data */
xgbe_default_config(pdata);
......@@ -664,33 +275,33 @@ static int xgbe_probe(struct platform_device *pdev)
DMA_BIT_MASK(pdata->hw_feat.dma_width));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed\n");
goto err_io;
return ret;
}
/* Calculate the number of Tx and Rx rings to be created
* -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
* the number of Tx queues to the number of Tx channels
* enabled
* -Rx (DMA) Channels do not map 1-to-1 so use the actual
* number of Rx queues
*/
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
pdata->tx_q_count = pdata->tx_ring_count;
/* Set default max values if not provided */
if (!pdata->tx_max_fifo_size)
pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
if (!pdata->rx_max_fifo_size)
pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
pdata->rx_desc_count = XGBE_RX_DESC_CNT;
/* Set the number of queues */
ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
if (ret) {
dev_err(dev, "error setting real tx queue count\n");
goto err_io;
return ret;
}
pdata->rx_ring_count = min_t(unsigned int,
netif_get_num_default_rss_queues(),
pdata->hw_feat.rx_ch_cnt);
pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
if (ret) {
dev_err(dev, "error setting real rx queue count\n");
goto err_io;
return ret;
}
/* Initialize RSS hash key and lookup table */
......@@ -705,7 +316,9 @@ static int xgbe_probe(struct platform_device *pdev)
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Call MDIO/PHY initialization routine */
pdata->phy_if.phy_init(pdata);
ret = pdata->phy_if.phy_init(pdata);
if (ret)
return ret;
/* Set device operations */
netdev->netdev_ops = xgbe_get_netdev_ops();
......@@ -752,7 +365,7 @@ static int xgbe_probe(struct platform_device *pdev)
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "net device registration failed\n");
goto err_io;
return ret;
}
/* Create the PHY/ANEG name based on netdev name */
......@@ -780,12 +393,6 @@ static int xgbe_probe(struct platform_device *pdev)
xgbe_debugfs_init(pdata);
platform_device_put(phy_pdev);
netdev_notice(netdev, "net device enabled\n");
DBGPR("<-- xgbe_probe\n");
return 0;
err_wq:
......@@ -794,29 +401,19 @@ static int xgbe_probe(struct platform_device *pdev)
err_netdev:
unregister_netdev(netdev);
err_io:
platform_device_put(phy_pdev);
err_phydev:
free_netdev(netdev);
err_alloc:
dev_notice(dev, "net device not enabled\n");
return ret;
}
static int xgbe_remove(struct platform_device *pdev)
void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct xgbe_prv_data *pdata = netdev_priv(netdev);
DBGPR("-->xgbe_remove\n");
struct net_device *netdev = pdata->netdev;
xgbe_debugfs_exit(pdata);
xgbe_ptp_unregister(pdata);
pdata->phy_if.phy_exit(pdata);
flush_workqueue(pdata->an_workqueue);
destroy_workqueue(pdata->an_workqueue);
......@@ -824,94 +421,23 @@ static int xgbe_remove(struct platform_device *pdev)
destroy_workqueue(pdata->dev_workqueue);
unregister_netdev(netdev);
free_netdev(netdev);
DBGPR("<--xgbe_remove\n");
return 0;
}
#ifdef CONFIG_PM
static int xgbe_suspend(struct device *dev)
static int __init xgbe_mod_init(void)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct xgbe_prv_data *pdata = netdev_priv(netdev);
int ret = 0;
DBGPR("-->xgbe_suspend\n");
if (netif_running(netdev))
ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
int ret;
DBGPR("<--xgbe_suspend\n");
ret = xgbe_platform_init();
if (ret)
return ret;
return ret;
return 0;
}
static int xgbe_resume(struct device *dev)
static void __exit xgbe_mod_exit(void)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct xgbe_prv_data *pdata = netdev_priv(netdev);
int ret = 0;
DBGPR("-->xgbe_resume\n");
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
if (netif_running(netdev)) {
ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
/* Schedule a restart in case the link or phy state changed
* while we were powered down.
*/
schedule_work(&pdata->restart_work);
}
DBGPR("<--xgbe_resume\n");
return ret;
xgbe_platform_exit();
}
#endif /* CONFIG_PM */
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ "AMDI8001", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a", },
{},
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
#endif
static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
static struct platform_driver xgbe_driver = {
.driver = {
.name = "amd-xgbe",
#ifdef CONFIG_ACPI
.acpi_match_table = xgbe_acpi_match,
#endif
#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
#endif
.pm = &xgbe_pm_ops,
},
.probe = xgbe_probe,
.remove = xgbe_remove,
};
module_platform_driver(xgbe_driver);
module_init(xgbe_mod_init);
module_exit(xgbe_mod_exit);
......@@ -125,303 +125,219 @@
#include "xgbe.h"
#include "xgbe-common.h"
static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
{
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
int reg;
reg |= XGBE_KR_TRAINING_ENABLE;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
reg &= ~XGBE_AN_CL37_INT_MASK;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
}
static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
static void xgbe_an37_disable_interrupts(struct xgbe_prv_data *pdata)
{
unsigned int reg;
int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
reg &= ~XGBE_AN_CL37_INT_MASK;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
reg &= ~XGBE_KR_TRAINING_ENABLE;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
reg &= ~XGBE_PCS_CL37_BP;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
}
static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
static void xgbe_an37_enable_interrupts(struct xgbe_prv_data *pdata)
{
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
int reg;
reg |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
reg |= XGBE_PCS_CL37_BP;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
usleep_range(75, 100);
reg &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
reg |= XGBE_AN_CL37_INT_MASK;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
}
static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
static void xgbe_an73_clear_interrupts(struct xgbe_prv_data *pdata)
{
/* Assert Rx and Tx ratechange */
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
}
static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
static void xgbe_an73_disable_interrupts(struct xgbe_prv_data *pdata)
{
unsigned int wait;
u16 status;
/* Release Rx and Tx ratechange */
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
}
/* Wait for Rx and Tx ready */
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
usleep_range(50, 75);
static void xgbe_an73_enable_interrupts(struct xgbe_prv_data *pdata)
{
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK);
}
status = XSIR0_IOREAD(pdata, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
goto rx_reset;
static void xgbe_an_enable_interrupts(struct xgbe_prv_data *pdata)
{
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
xgbe_an73_enable_interrupts(pdata);
break;
case XGBE_AN_MODE_CL37:
case XGBE_AN_MODE_CL37_SGMII:
xgbe_an37_enable_interrupts(pdata);
break;
default:
break;
}
}
netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
status);
rx_reset:
/* Perform Rx reset for the DFE changes */
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
{
xgbe_an73_clear_interrupts(pdata);
xgbe_an37_clear_interrupts(pdata);
}
static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata)
{
unsigned int reg;
/* Enable KR training */
xgbe_an_enable_kr_training(pdata);
/* Set MAC to 10G speed */
pdata->hw_if.set_xgmii_speed(pdata);
/* Set PCS to KR/10G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBR;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED10G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
reg |= XGBE_KR_TRAINING_ENABLE;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
}
xgbe_pcs_power_cycle(pdata);
static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata)
{
unsigned int reg;
/* Set SerDes to 10G speed */
xgbe_serdes_start_ratechange(pdata);
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
reg &= ~XGBE_KR_TRAINING_ENABLE;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
}
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
pdata->serdes_tx_amp[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
pdata->serdes_blwc[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
pdata->serdes_pq_skew[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
{
/* Enable KR training */
xgbe_an73_enable_kr_training(pdata);
xgbe_serdes_complete_ratechange(pdata);
/* Set MAC to 10G speed */
pdata->hw_if.set_speed(pdata, SPEED_10000);
netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
/* Call PHY implementation support to complete rate change */
pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KR);
}
static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
{
unsigned int reg;
/* Disable KR training */
xgbe_an_disable_kr_training(pdata);
xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 2.5G speed */
pdata->hw_if.set_gmii_2500_speed(pdata);
/* Set PCS to KX/1G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
xgbe_pcs_power_cycle(pdata);
pdata->hw_if.set_speed(pdata, SPEED_2500);
/* Set SerDes to 2.5G speed */
xgbe_serdes_start_ratechange(pdata);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
pdata->serdes_tx_amp[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
pdata->serdes_blwc[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
pdata->serdes_pq_skew[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
xgbe_serdes_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
/* Call PHY implementation support to complete rate change */
pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_2500);
}
static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
{
unsigned int reg;
/* Disable KR training */
xgbe_an_disable_kr_training(pdata);
xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 1G speed */
pdata->hw_if.set_gmii_speed(pdata);
/* Set PCS to KX/1G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
xgbe_pcs_power_cycle(pdata);
/* Set SerDes to 1G speed */
xgbe_serdes_start_ratechange(pdata);
pdata->hw_if.set_speed(pdata, SPEED_1000);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
pdata->serdes_tx_amp[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
pdata->serdes_blwc[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
pdata->serdes_pq_skew[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
xgbe_serdes_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
/* Call PHY implementation support to complete rate change */
pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_1000);
}
static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode *mode)
static enum xgbe_mode xgbe_cur_mode(struct xgbe_prv_data *pdata)
{
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
*mode = XGBE_MODE_KR;
else
*mode = XGBE_MODE_KX;
return pdata->phy_if.phy_impl.cur_mode(pdata);
}
static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
{
enum xgbe_mode mode;
xgbe_cur_mode(pdata, &mode);
return (xgbe_cur_mode(pdata) == XGBE_MODE_KR);
}
return (mode == XGBE_MODE_KR);
static void xgbe_change_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode mode)
{
switch (mode) {
case XGBE_MODE_KX_1000:
xgbe_kx_1000_mode(pdata);
break;
case XGBE_MODE_KX_2500:
xgbe_kx_2500_mode(pdata);
break;
case XGBE_MODE_KR:
xgbe_kr_mode(pdata);
break;
case XGBE_MODE_UNKNOWN:
break;
default:
netif_dbg(pdata, link, pdata->netdev,
"invalid operation mode requested (%u)\n", mode);
}
}
static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
{
/* If we are in KR switch to KX, and vice-versa */
if (xgbe_in_kr_mode(pdata)) {
if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
xgbe_gmii_mode(pdata);
else
xgbe_gmii_2500_mode(pdata);
} else {
xgbe_xgmii_mode(pdata);
}
xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
}
static void xgbe_set_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode mode)
{
enum xgbe_mode cur_mode;
if (mode == xgbe_cur_mode(pdata))
return;
xgbe_cur_mode(pdata, &cur_mode);
if (mode != cur_mode)
xgbe_switch_mode(pdata);
xgbe_change_mode(pdata, mode);
}
static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
static bool xgbe_use_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode mode)
{
if (pdata->phy.autoneg == AUTONEG_ENABLE) {
if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
return true;
} else {
if (pdata->phy.speed == SPEED_10000)
return true;
}
return pdata->phy_if.phy_impl.use_mode(pdata, mode);
}
return false;
static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
bool restart)
{
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
if (enable)
reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
if (restart)
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
}
static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
{
if (pdata->phy.autoneg == AUTONEG_ENABLE) {
if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
return true;
} else {
if (pdata->phy.speed == SPEED_2500)
return true;
}
xgbe_an37_enable_interrupts(pdata);
xgbe_an37_set(pdata, true, true);
return false;
netif_dbg(pdata, link, pdata->netdev, "CL37 AN enabled/restarted\n");
}
static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
static void xgbe_an37_disable(struct xgbe_prv_data *pdata)
{
if (pdata->phy.autoneg == AUTONEG_ENABLE) {
if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
return true;
} else {
if (pdata->phy.speed == SPEED_1000)
return true;
}
xgbe_an37_set(pdata, false, false);
xgbe_an37_disable_interrupts(pdata);
return false;
netif_dbg(pdata, link, pdata->netdev, "CL37 AN disabled\n");
}
static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable,
bool restart)
{
unsigned int reg;
......@@ -437,22 +353,60 @@ static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
static void xgbe_restart_an(struct xgbe_prv_data *pdata)
static void xgbe_an73_restart(struct xgbe_prv_data *pdata)
{
xgbe_set_an(pdata, true, true);
xgbe_an73_enable_interrupts(pdata);
xgbe_an73_set(pdata, true, true);
netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
netif_dbg(pdata, link, pdata->netdev, "CL73 AN enabled/restarted\n");
}
static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
{
xgbe_an73_set(pdata, false, false);
xgbe_an73_disable_interrupts(pdata);
netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
}
static void xgbe_an_restart(struct xgbe_prv_data *pdata)
{
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
xgbe_an73_restart(pdata);
break;
case XGBE_AN_MODE_CL37:
case XGBE_AN_MODE_CL37_SGMII:
xgbe_an37_restart(pdata);
break;
default:
break;
}
}
static void xgbe_disable_an(struct xgbe_prv_data *pdata)
static void xgbe_an_disable(struct xgbe_prv_data *pdata)
{
xgbe_set_an(pdata, false, false);
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
xgbe_an73_disable(pdata);
break;
case XGBE_AN_MODE_CL37:
case XGBE_AN_MODE_CL37_SGMII:
xgbe_an37_disable(pdata);
break;
default:
break;
}
}
netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
static void xgbe_an_disable_all(struct xgbe_prv_data *pdata)
{
xgbe_an73_disable(pdata);
xgbe_an37_disable(pdata);
}
static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
enum xgbe_rx *state)
static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
enum xgbe_rx *state)
{
unsigned int ad_reg, lp_reg, reg;
......@@ -476,13 +430,15 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
/* Start KR training */
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
if (reg & XGBE_KR_TRAINING_ENABLE) {
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
if (pdata->phy_if.phy_impl.kr_training_pre)
pdata->phy_if.phy_impl.kr_training_pre(pdata);
reg |= XGBE_KR_TRAINING_START;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
reg);
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
if (pdata->phy_if.phy_impl.kr_training_post)
pdata->phy_if.phy_impl.kr_training_post(pdata);
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
......@@ -491,8 +447,8 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
return XGBE_AN_PAGE_RECEIVED;
}
static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
enum xgbe_rx *state)
static enum xgbe_an xgbe_an73_tx_xnp(struct xgbe_prv_data *pdata,
enum xgbe_rx *state)
{
u16 msg;
......@@ -508,8 +464,8 @@ static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
return XGBE_AN_PAGE_RECEIVED;
}
static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
enum xgbe_rx *state)
static enum xgbe_an xgbe_an73_rx_bpa(struct xgbe_prv_data *pdata,
enum xgbe_rx *state)
{
unsigned int link_support;
unsigned int reg, ad_reg, lp_reg;
......@@ -528,12 +484,12 @@ static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
(lp_reg & XGBE_XNP_NP_EXCHANGE))
? xgbe_an_tx_xnp(pdata, state)
: xgbe_an_tx_training(pdata, state);
? xgbe_an73_tx_xnp(pdata, state)
: xgbe_an73_tx_training(pdata, state);
}
static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
enum xgbe_rx *state)
static enum xgbe_an xgbe_an73_rx_xnp(struct xgbe_prv_data *pdata,
enum xgbe_rx *state)
{
unsigned int ad_reg, lp_reg;
......@@ -543,11 +499,11 @@ static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
(lp_reg & XGBE_XNP_NP_EXCHANGE))
? xgbe_an_tx_xnp(pdata, state)
: xgbe_an_tx_training(pdata, state);
? xgbe_an73_tx_xnp(pdata, state)
: xgbe_an73_tx_training(pdata, state);
}
static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata)
{
enum xgbe_rx *state;
unsigned long an_timeout;
......@@ -566,20 +522,20 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
pdata->an_start = jiffies;
netif_dbg(pdata, link, pdata->netdev,
"AN timed out, resetting state\n");
"CL73 AN timed out, resetting state\n");
}
}
state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
: &pdata->kx_state;
: &pdata->kx_state;
switch (*state) {
case XGBE_RX_BPA:
ret = xgbe_an_rx_bpa(pdata, state);
ret = xgbe_an73_rx_bpa(pdata, state);
break;
case XGBE_RX_XNP:
ret = xgbe_an_rx_xnp(pdata, state);
ret = xgbe_an73_rx_xnp(pdata, state);
break;
default:
......@@ -589,7 +545,7 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
return ret;
}
static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
{
/* Be sure we aren't looping trying to negotiate */
if (xgbe_in_kr_mode(pdata)) {
......@@ -611,23 +567,43 @@ static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
return XGBE_AN_NO_LINK;
}
xgbe_disable_an(pdata);
xgbe_an73_disable(pdata);
xgbe_switch_mode(pdata);
xgbe_restart_an(pdata);
xgbe_an73_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
}
static irqreturn_t xgbe_an_isr(int irq, void *data)
static void xgbe_an37_isr(struct xgbe_prv_data *pdata)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
unsigned int reg;
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
/* Disable AN interrupts */
xgbe_an37_disable_interrupts(pdata);
/* Save the interrupt(s) that fired */
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
pdata->an_int = reg & XGBE_AN_CL37_INT_MASK;
pdata->an_status = reg & ~XGBE_AN_CL37_INT_MASK;
if (pdata->an_int) {
/* Clear the interrupt(s) that fired and process them */
reg &= ~XGBE_AN_CL37_INT_MASK;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
queue_work(pdata->an_workqueue, &pdata->an_irq_work);
} else {
/* Enable AN interrupts */
xgbe_an37_enable_interrupts(pdata);
}
}
static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
{
/* Disable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
xgbe_an73_disable_interrupts(pdata);
/* Save the interrupt(s) that fired */
pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
......@@ -639,8 +615,26 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
queue_work(pdata->an_workqueue, &pdata->an_irq_work);
} else {
/* Enable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
XGBE_AN_INT_MASK);
xgbe_an73_enable_interrupts(pdata);
}
}
static irqreturn_t xgbe_an_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
xgbe_an73_isr(pdata);
break;
case XGBE_AN_MODE_CL37:
case XGBE_AN_MODE_CL37_SGMII:
xgbe_an37_isr(pdata);
break;
default:
break;
}
return IRQ_HANDLED;
......@@ -679,36 +673,87 @@ static const char *xgbe_state_as_string(enum xgbe_an state)
}
}
static void xgbe_an_state_machine(struct work_struct *work)
static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
{
struct xgbe_prv_data *pdata = container_of(work,
struct xgbe_prv_data,
an_work);
enum xgbe_an cur_state = pdata->an_state;
mutex_lock(&pdata->an_mutex);
if (!pdata->an_int)
return;
if (pdata->an_int & XGBE_AN_CL37_INT_CMPLT) {
pdata->an_state = XGBE_AN_COMPLETE;
pdata->an_int &= ~XGBE_AN_CL37_INT_CMPLT;
/* If SGMII is enabled, check the link status */
if ((pdata->an_mode == XGBE_AN_MODE_CL37_SGMII) &&
!(pdata->an_status & XGBE_SGMII_AN_LINK_STATUS))
pdata->an_state = XGBE_AN_NO_LINK;
}
netif_dbg(pdata, link, pdata->netdev, "CL37 AN %s\n",
xgbe_state_as_string(pdata->an_state));
cur_state = pdata->an_state;
switch (pdata->an_state) {
case XGBE_AN_READY:
break;
case XGBE_AN_COMPLETE:
netif_dbg(pdata, link, pdata->netdev,
"Auto negotiation successful\n");
break;
case XGBE_AN_NO_LINK:
break;
default:
pdata->an_state = XGBE_AN_ERROR;
}
if (pdata->an_state == XGBE_AN_ERROR) {
netdev_err(pdata->netdev,
"error during auto-negotiation, state=%u\n",
cur_state);
pdata->an_int = 0;
xgbe_an37_clear_interrupts(pdata);
}
if (pdata->an_state >= XGBE_AN_COMPLETE) {
pdata->an_result = pdata->an_state;
pdata->an_state = XGBE_AN_READY;
netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
xgbe_state_as_string(pdata->an_result));
}
xgbe_an37_enable_interrupts(pdata);
}
static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata)
{
enum xgbe_an cur_state = pdata->an_state;
if (!pdata->an_int)
goto out;
return;
next_int:
if (pdata->an_int & XGBE_AN_PG_RCV) {
if (pdata->an_int & XGBE_AN_CL73_PG_RCV) {
pdata->an_state = XGBE_AN_PAGE_RECEIVED;
pdata->an_int &= ~XGBE_AN_PG_RCV;
} else if (pdata->an_int & XGBE_AN_INC_LINK) {
pdata->an_int &= ~XGBE_AN_CL73_PG_RCV;
} else if (pdata->an_int & XGBE_AN_CL73_INC_LINK) {
pdata->an_state = XGBE_AN_INCOMPAT_LINK;
pdata->an_int &= ~XGBE_AN_INC_LINK;
} else if (pdata->an_int & XGBE_AN_INT_CMPLT) {
pdata->an_int &= ~XGBE_AN_CL73_INC_LINK;
} else if (pdata->an_int & XGBE_AN_CL73_INT_CMPLT) {
pdata->an_state = XGBE_AN_COMPLETE;
pdata->an_int &= ~XGBE_AN_INT_CMPLT;
pdata->an_int &= ~XGBE_AN_CL73_INT_CMPLT;
} else {
pdata->an_state = XGBE_AN_ERROR;
}
pdata->an_result = pdata->an_state;
again:
netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
netif_dbg(pdata, link, pdata->netdev, "CL73 AN %s\n",
xgbe_state_as_string(pdata->an_state));
cur_state = pdata->an_state;
......@@ -719,14 +764,14 @@ static void xgbe_an_state_machine(struct work_struct *work)
break;
case XGBE_AN_PAGE_RECEIVED:
pdata->an_state = xgbe_an_page_received(pdata);
pdata->an_state = xgbe_an73_page_received(pdata);
pdata->an_supported++;
break;
case XGBE_AN_INCOMPAT_LINK:
pdata->an_supported = 0;
pdata->parallel_detect = 0;
pdata->an_state = xgbe_an_incompat_link(pdata);
pdata->an_state = xgbe_an73_incompat_link(pdata);
break;
case XGBE_AN_COMPLETE:
......@@ -745,14 +790,14 @@ static void xgbe_an_state_machine(struct work_struct *work)
if (pdata->an_state == XGBE_AN_NO_LINK) {
pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
xgbe_an73_clear_interrupts(pdata);
} else if (pdata->an_state == XGBE_AN_ERROR) {
netdev_err(pdata->netdev,
"error during auto-negotiation, state=%u\n",
cur_state);
pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
xgbe_an73_clear_interrupts(pdata);
}
if (pdata->an_state >= XGBE_AN_COMPLETE) {
......@@ -762,7 +807,7 @@ static void xgbe_an_state_machine(struct work_struct *work)
pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = 0;
netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
xgbe_state_as_string(pdata->an_result));
}
......@@ -772,14 +817,77 @@ static void xgbe_an_state_machine(struct work_struct *work)
if (pdata->an_int)
goto next_int;
out:
/* Enable AN interrupts on the way out */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
xgbe_an73_enable_interrupts(pdata);
}
static void xgbe_an_state_machine(struct work_struct *work)
{
struct xgbe_prv_data *pdata = container_of(work,
struct xgbe_prv_data,
an_work);
mutex_lock(&pdata->an_mutex);
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
xgbe_an73_state_machine(pdata);
break;
case XGBE_AN_MODE_CL37:
case XGBE_AN_MODE_CL37_SGMII:
xgbe_an37_state_machine(pdata);
break;
default:
break;
}
mutex_unlock(&pdata->an_mutex);
}
static void xgbe_an_init(struct xgbe_prv_data *pdata)
static void xgbe_an37_init(struct xgbe_prv_data *pdata)
{
unsigned int reg;
/* Set up Advertisement register */
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
if (pdata->phy.advertising & ADVERTISED_Pause)
reg |= 0x100;
else
reg &= ~0x100;
if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
reg |= 0x80;
else
reg &= ~0x80;
/* Full duplex, but not half */
reg |= XGBE_AN_CL37_FD_MASK;
reg &= ~XGBE_AN_CL37_HD_MASK;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg);
/* Set up the Control register */
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
reg &= XGBE_AN_CL37_TX_CONFIG_MASK;
reg &= XGBE_AN_CL37_PCS_MODE_MASK;
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL37:
reg |= XGBE_AN_CL37_PCS_MODE_BASEX;
break;
case XGBE_AN_MODE_CL37_SGMII:
reg |= XGBE_AN_CL37_PCS_MODE_SGMII;
break;
default:
break;
}
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
}
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
{
unsigned int reg;
......@@ -824,7 +932,24 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
netif_dbg(pdata, link, pdata->netdev, "CL73 AN initialized\n");
}
static void xgbe_an_init(struct xgbe_prv_data *pdata)
{
/* Set up advertisement registers based on current settings */
pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
xgbe_an73_init(pdata);
break;
case XGBE_AN_MODE_CL37:
case XGBE_AN_MODE_CL37_SGMII:
xgbe_an37_init(pdata);
break;
default:
break;
}
}
static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
......@@ -907,24 +1032,28 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
xgbe_phy_print_status(pdata);
}
static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
{
return pdata->phy_if.phy_impl.valid_speed(pdata, speed);
}
static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
{
enum xgbe_mode mode;
netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
/* Disable auto-negotiation */
xgbe_disable_an(pdata);
/* Validate/Set specified speed */
switch (pdata->phy.speed) {
case SPEED_10000:
xgbe_set_mode(pdata, XGBE_MODE_KR);
xgbe_an_disable(pdata);
/* Set specified mode for specified speed */
mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
switch (mode) {
case XGBE_MODE_KX_1000:
case XGBE_MODE_KX_2500:
case XGBE_MODE_KR:
break;
case SPEED_2500:
case SPEED_1000:
xgbe_set_mode(pdata, XGBE_MODE_KX);
break;
case XGBE_MODE_UNKNOWN:
default:
return -EINVAL;
}
......@@ -933,6 +1062,8 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
if (pdata->phy.duplex != DUPLEX_FULL)
return -EINVAL;
xgbe_set_mode(pdata, mode);
return 0;
}
......@@ -950,21 +1081,22 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
disable_irq(pdata->an_irq);
/* Start auto-negotiation in a supported mode */
if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
xgbe_set_mode(pdata, XGBE_MODE_KR);
} else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
xgbe_set_mode(pdata, XGBE_MODE_KX);
} else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
} else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
} else {
enable_irq(pdata->an_irq);
return -EINVAL;
}
/* Disable and stop any in progress auto-negotiation */
xgbe_disable_an(pdata);
xgbe_an_disable_all(pdata);
/* Clear any auto-negotitation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
xgbe_an_clear_interrupts_all(pdata);
pdata->an_result = XGBE_AN_READY;
pdata->an_state = XGBE_AN_READY;
......@@ -974,11 +1106,8 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
/* Re-enable auto-negotiation interrupt */
enable_irq(pdata->an_irq);
/* Set up advertisement registers based on current settings */
xgbe_an_init(pdata);
/* Enable and start auto-negotiation */
xgbe_restart_an(pdata);
xgbe_an_restart(pdata);
return 0;
}
......@@ -1016,108 +1145,45 @@ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
}
}
static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
{
if (xgbe_in_kr_mode(pdata)) {
pdata->phy.speed = SPEED_10000;
} else {
switch (pdata->speed_set) {
case XGBE_SPEEDSET_1000_10000:
pdata->phy.speed = SPEED_1000;
break;
case XGBE_SPEEDSET_2500_10000:
pdata->phy.speed = SPEED_2500;
break;
}
}
pdata->phy.duplex = DUPLEX_FULL;
return pdata->phy_if.phy_impl.an_outcome(pdata);
}
static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
{
unsigned int ad_reg, lp_reg;
enum xgbe_mode mode;
pdata->phy.lp_advertising = 0;
if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
return xgbe_phy_status_force(pdata);
pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
pdata->phy.lp_advertising |= ADVERTISED_Backplane;
/* Compare Advertisement and Link Partner register 1 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
if (lp_reg & 0x400)
pdata->phy.lp_advertising |= ADVERTISED_Pause;
if (lp_reg & 0x800)
pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
if (pdata->phy.pause_autoneg) {
/* Set flow control based on auto-negotiation result */
pdata->phy.tx_pause = 0;
pdata->phy.rx_pause = 0;
if (ad_reg & lp_reg & 0x400) {
pdata->phy.tx_pause = 1;
pdata->phy.rx_pause = 1;
} else if (ad_reg & lp_reg & 0x800) {
if (ad_reg & 0x400)
pdata->phy.rx_pause = 1;
else if (lp_reg & 0x400)
pdata->phy.tx_pause = 1;
}
}
/* Compare Advertisement and Link Partner register 2 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
if (lp_reg & 0x80)
pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
if (lp_reg & 0x20) {
switch (pdata->speed_set) {
case XGBE_SPEEDSET_1000_10000:
pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
break;
case XGBE_SPEEDSET_2500_10000:
pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
break;
}
}
mode = xgbe_cur_mode(pdata);
else
mode = xgbe_phy_status_aneg(pdata);
ad_reg &= lp_reg;
if (ad_reg & 0x80) {
switch (mode) {
case XGBE_MODE_KX_1000:
pdata->phy.speed = SPEED_1000;
break;
case XGBE_MODE_KX_2500:
pdata->phy.speed = SPEED_2500;
break;
case XGBE_MODE_KR:
pdata->phy.speed = SPEED_10000;
xgbe_set_mode(pdata, XGBE_MODE_KR);
} else if (ad_reg & 0x20) {
switch (pdata->speed_set) {
case XGBE_SPEEDSET_1000_10000:
pdata->phy.speed = SPEED_1000;
break;
case XGBE_SPEEDSET_2500_10000:
pdata->phy.speed = SPEED_2500;
break;
}
xgbe_set_mode(pdata, XGBE_MODE_KX);
} else {
break;
case XGBE_MODE_UNKNOWN:
default:
pdata->phy.speed = SPEED_UNKNOWN;
}
/* Compare Advertisement and Link Partner register 3 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
if (lp_reg & 0xc000)
pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
pdata->phy.duplex = DUPLEX_FULL;
xgbe_set_mode(pdata, mode);
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
{
unsigned int reg, link_aneg;
unsigned int link_aneg;
if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
netif_carrier_off(pdata->netdev);
......@@ -1128,20 +1194,14 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
/* Get the link status. Link status is latched low, so read
* once to clear and then read again to get current state
*/
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata);
if (pdata->phy.link) {
if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
xgbe_check_link_timeout(pdata);
return;
}
xgbe_phy_status_aneg(pdata);
xgbe_phy_status_result(pdata);
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
......@@ -1155,7 +1215,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
return;
}
xgbe_phy_status_aneg(pdata);
xgbe_phy_status_result(pdata);
netif_carrier_off(pdata->netdev);
}
......@@ -1168,14 +1228,19 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
/* Disable auto-negotiation */
xgbe_disable_an(pdata);
if (!pdata->phy_started)
return;
/* Disable auto-negotiation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
/* Indicate the PHY is down */
pdata->phy_started = 0;
/* Disable auto-negotiation */
xgbe_an_disable_all(pdata);
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
pdata->phy_if.phy_impl.stop(pdata);
pdata->phy.link = 0;
netif_carrier_off(pdata->netdev);
......@@ -1189,64 +1254,62 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
ret = pdata->phy_if.phy_impl.start(pdata);
if (ret)
return ret;
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
pdata);
if (ret) {
netdev_err(netdev, "phy irq request failed\n");
return ret;
goto err_stop;
}
/* Set initial mode - call the mode setting routines
* directly to insure we are properly configured
*/
if (xgbe_use_xgmii_mode(pdata)) {
xgbe_xgmii_mode(pdata);
} else if (xgbe_use_gmii_mode(pdata)) {
xgbe_gmii_mode(pdata);
} else if (xgbe_use_gmii_2500_mode(pdata)) {
xgbe_gmii_2500_mode(pdata);
if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
xgbe_kr_mode(pdata);
} else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
xgbe_kx_2500_mode(pdata);
} else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
xgbe_kx_1000_mode(pdata);
} else {
ret = -EINVAL;
goto err_irq;
}
/* Set up advertisement registers based on current settings */
xgbe_an_init(pdata);
/* Indicate the PHY is up and running */
pdata->phy_started = 1;
/* Enable auto-negotiation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
xgbe_an_init(pdata);
xgbe_an_enable_interrupts(pdata);
return xgbe_phy_config_aneg(pdata);
err_irq:
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
err_stop:
pdata->phy_if.phy_impl.stop(pdata);
return ret;
}
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
unsigned int count, reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg |= MDIO_CTRL1_RESET;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
count = 50;
do {
msleep(20);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
} while ((reg & MDIO_CTRL1_RESET) && --count);
int ret;
if (reg & MDIO_CTRL1_RESET)
return -ETIMEDOUT;
ret = pdata->phy_if.phy_impl.reset(pdata);
if (ret)
return ret;
/* Disable auto-negotiation for now */
xgbe_disable_an(pdata);
xgbe_an_disable_all(pdata);
/* Clear auto-negotiation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
xgbe_an_clear_interrupts_all(pdata);
return 0;
}
......@@ -1257,74 +1320,90 @@ static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
dev_dbg(dev, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
dev_dbg(dev, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
dev_dbg(dev, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
dev_dbg(dev, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
dev_dbg(dev, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
dev_dbg(dev, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
dev_dbg(dev, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
dev_dbg(dev, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n",
MDIO_AN_ADVERTISE,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n",
MDIO_AN_ADVERTISE + 1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n",
MDIO_AN_ADVERTISE + 2,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
dev_dbg(dev, "Auto-Neg Completion Reg (%#06x) = %#06x\n",
MDIO_AN_COMP_STAT,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
dev_dbg(dev, "\n*************************************************\n");
}
static void xgbe_phy_init(struct xgbe_prv_data *pdata)
static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
{
if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
return SPEED_10000;
else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
return SPEED_2500;
else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
return SPEED_1000;
return SPEED_UNKNOWN;
}
static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
{
xgbe_phy_stop(pdata);
pdata->phy_if.phy_impl.exit(pdata);
}
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
{
int ret;
mutex_init(&pdata->an_mutex);
INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
pdata->mdio_mmd = MDIO_MMD_PCS;
/* Initialize supported features */
pdata->phy.supported = SUPPORTED_Autoneg;
pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
pdata->phy.supported |= SUPPORTED_Backplane;
pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
switch (pdata->speed_set) {
case XGBE_SPEEDSET_1000_10000:
pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
break;
case XGBE_SPEEDSET_2500_10000:
pdata->phy.supported |= SUPPORTED_2500baseX_Full;
break;
}
/* Check for FEC support */
pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
MDIO_PMA_10GBR_FECABLE);
pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
MDIO_PMA_10GBR_FECABLE_ERRABLE);
if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
/* Setup the phy (including supported features) */
ret = pdata->phy_if.phy_impl.init(pdata);
if (ret)
return ret;
pdata->phy.advertising = pdata->phy.supported;
pdata->phy.address = 0;
pdata->phy.autoneg = AUTONEG_ENABLE;
pdata->phy.speed = SPEED_UNKNOWN;
pdata->phy.duplex = DUPLEX_UNKNOWN;
if (pdata->phy.advertising & ADVERTISED_Autoneg) {
pdata->phy.autoneg = AUTONEG_ENABLE;
pdata->phy.speed = SPEED_UNKNOWN;
pdata->phy.duplex = DUPLEX_UNKNOWN;
} else {
pdata->phy.autoneg = AUTONEG_DISABLE;
pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata);
pdata->phy.duplex = DUPLEX_FULL;
}
pdata->phy.link = 0;
......@@ -1346,11 +1425,14 @@ static void xgbe_phy_init(struct xgbe_prv_data *pdata)
if (netif_msg_drv(pdata))
xgbe_dump_phy_registers(pdata);
return 0;
}
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
{
phy_if->phy_init = xgbe_phy_init;
phy_if->phy_exit = xgbe_phy_exit;
phy_if->phy_reset = xgbe_phy_reset;
phy_if->phy_start = xgbe_phy_start;
......@@ -1358,4 +1440,6 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
phy_if->phy_status = xgbe_phy_status;
phy_if->phy_config_aneg = xgbe_phy_config_aneg;
phy_if->phy_valid_speed = xgbe_phy_valid_speed;
}
/*
* AMD 10Gb Ethernet driver
*
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
* Copyright (c) 2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
* your option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
*
* License 2: Modified BSD
*
* Copyright (c) 2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/property.h>
#include <linux/mdio.h>
#include <linux/phy.h>
#include "xgbe.h"
#include "xgbe-common.h"
#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
/* Default SerDes settings */
#define XGBE_SPEED_1000_BLWC 1
#define XGBE_SPEED_1000_CDR 0x2
#define XGBE_SPEED_1000_PLL 0x0
#define XGBE_SPEED_1000_PQ 0xa
#define XGBE_SPEED_1000_RATE 0x3
#define XGBE_SPEED_1000_TXAMP 0xf
#define XGBE_SPEED_1000_WORD 0x1
#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
#define XGBE_SPEED_2500_BLWC 1
#define XGBE_SPEED_2500_CDR 0x2
#define XGBE_SPEED_2500_PLL 0x0
#define XGBE_SPEED_2500_PQ 0xa
#define XGBE_SPEED_2500_RATE 0x1
#define XGBE_SPEED_2500_TXAMP 0xf
#define XGBE_SPEED_2500_WORD 0x1
#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
#define XGBE_SPEED_10000_BLWC 0
#define XGBE_SPEED_10000_CDR 0x7
#define XGBE_SPEED_10000_PLL 0x1
#define XGBE_SPEED_10000_PQ 0x12
#define XGBE_SPEED_10000_RATE 0x0
#define XGBE_SPEED_10000_TXAMP 0xa
#define XGBE_SPEED_10000_WORD 0x7
#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
/* Rate-change complete wait/retry count */
#define XGBE_RATECHANGE_COUNT 500
static const u32 xgbe_phy_blwc[] = {
XGBE_SPEED_1000_BLWC,
XGBE_SPEED_2500_BLWC,
XGBE_SPEED_10000_BLWC,
};
static const u32 xgbe_phy_cdr_rate[] = {
XGBE_SPEED_1000_CDR,
XGBE_SPEED_2500_CDR,
XGBE_SPEED_10000_CDR,
};
static const u32 xgbe_phy_pq_skew[] = {
XGBE_SPEED_1000_PQ,
XGBE_SPEED_2500_PQ,
XGBE_SPEED_10000_PQ,
};
static const u32 xgbe_phy_tx_amp[] = {
XGBE_SPEED_1000_TXAMP,
XGBE_SPEED_2500_TXAMP,
XGBE_SPEED_10000_TXAMP,
};
static const u32 xgbe_phy_dfe_tap_cfg[] = {
XGBE_SPEED_1000_DFE_TAP_CONFIG,
XGBE_SPEED_2500_DFE_TAP_CONFIG,
XGBE_SPEED_10000_DFE_TAP_CONFIG,
};
static const u32 xgbe_phy_dfe_tap_ena[] = {
XGBE_SPEED_1000_DFE_TAP_ENABLE,
XGBE_SPEED_2500_DFE_TAP_ENABLE,
XGBE_SPEED_10000_DFE_TAP_ENABLE,
};
struct xgbe_phy_data {
/* 1000/10000 vs 2500/10000 indicator */
unsigned int speed_set;
/* SerDes UEFI configurable settings.
* Switching between modes/speeds requires new values for some
* SerDes settings. The values can be supplied as device
* properties in array format. The first array entry is for
* 1GbE, second for 2.5GbE and third for 10GbE
*/
u32 blwc[XGBE_SPEEDS];
u32 cdr_rate[XGBE_SPEEDS];
u32 pq_skew[XGBE_SPEEDS];
u32 tx_amp[XGBE_SPEEDS];
u32 dfe_tap_cfg[XGBE_SPEEDS];
u32 dfe_tap_ena[XGBE_SPEEDS];
};
static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
{
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
}
static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
{
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
}
static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
enum xgbe_mode mode;
unsigned int ad_reg, lp_reg;
pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
pdata->phy.lp_advertising |= ADVERTISED_Backplane;
/* Compare Advertisement and Link Partner register 1 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
if (lp_reg & 0x400)
pdata->phy.lp_advertising |= ADVERTISED_Pause;
if (lp_reg & 0x800)
pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
if (pdata->phy.pause_autoneg) {
/* Set flow control based on auto-negotiation result */
pdata->phy.tx_pause = 0;
pdata->phy.rx_pause = 0;
if (ad_reg & lp_reg & 0x400) {
pdata->phy.tx_pause = 1;
pdata->phy.rx_pause = 1;
} else if (ad_reg & lp_reg & 0x800) {
if (ad_reg & 0x400)
pdata->phy.rx_pause = 1;
else if (lp_reg & 0x400)
pdata->phy.tx_pause = 1;
}
}
/* Compare Advertisement and Link Partner register 2 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
if (lp_reg & 0x80)
pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
if (lp_reg & 0x20) {
if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
else
pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
}
ad_reg &= lp_reg;
if (ad_reg & 0x80) {
mode = XGBE_MODE_KR;
} else if (ad_reg & 0x20) {
if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
mode = XGBE_MODE_KX_2500;
else
mode = XGBE_MODE_KX_1000;
} else {
mode = XGBE_MODE_UNKNOWN;
}
/* Compare Advertisement and Link Partner register 3 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
if (lp_reg & 0xc000)
pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
return mode;
}
static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
{
return XGBE_AN_MODE_CL73;
}
static void xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata)
{
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
usleep_range(75, 100);
reg &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
}
static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
{
/* Assert Rx and Tx ratechange */
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
}
static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
{
unsigned int wait;
u16 status;
/* Release Rx and Tx ratechange */
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
/* Wait for Rx and Tx ready */
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
usleep_range(50, 75);
status = XSIR0_IOREAD(pdata, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
goto rx_reset;
}
netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
status);
rx_reset:
/* Perform Rx reset for the DFE changes */
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
}
static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg;
/* Set PCS to KR/10G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBR;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED10G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
xgbe_phy_pcs_power_cycle(pdata);
/* Set SerDes to 10G speed */
xgbe_phy_start_ratechange(pdata);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_10000]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_10000]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_10000]);
xgbe_phy_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
}
static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg;
/* Set PCS to KX/1G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
xgbe_phy_pcs_power_cycle(pdata);
/* Set SerDes to 2.5G speed */
xgbe_phy_start_ratechange(pdata);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_2500]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_2500]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_2500]);
xgbe_phy_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
}
static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg;
/* Set PCS to KX/1G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
xgbe_phy_pcs_power_cycle(pdata);
/* Set SerDes to 1G speed */
xgbe_phy_start_ratechange(pdata);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_1000]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_1000]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_1000]);
xgbe_phy_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
}
static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
enum xgbe_mode mode;
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= MDIO_PCS_CTRL2_TYPE;
if (reg == MDIO_PCS_CTRL2_10GBR) {
mode = XGBE_MODE_KR;
} else {
if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
mode = XGBE_MODE_KX_2500;
else
mode = XGBE_MODE_KX_1000;
}
return mode;
}
static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
enum xgbe_mode mode;
/* If we are in KR switch to KX, and vice-versa */
if (xgbe_phy_cur_mode(pdata) == XGBE_MODE_KR) {
if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
mode = XGBE_MODE_KX_2500;
else
mode = XGBE_MODE_KX_1000;
} else {
mode = XGBE_MODE_KR;
}
return mode;
}
static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
int speed)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
switch (speed) {
case SPEED_1000:
return (phy_data->speed_set == XGBE_SPEEDSET_1000_10000)
? XGBE_MODE_KX_1000 : XGBE_MODE_UNKNOWN;
case SPEED_2500:
return (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
? XGBE_MODE_KX_2500 : XGBE_MODE_UNKNOWN;
case SPEED_10000:
return XGBE_MODE_KR;
default:
return XGBE_MODE_UNKNOWN;
}
}
static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
{
switch (mode) {
case XGBE_MODE_KX_1000:
xgbe_phy_kx_1000_mode(pdata);
break;
case XGBE_MODE_KX_2500:
xgbe_phy_kx_2500_mode(pdata);
break;
case XGBE_MODE_KR:
xgbe_phy_kr_mode(pdata);
break;
default:
break;
}
}
static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode mode, u32 advert)
{
if (pdata->phy.autoneg == AUTONEG_ENABLE) {
if (pdata->phy.advertising & advert)
return true;
} else {
enum xgbe_mode cur_mode;
cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
if (cur_mode == mode)
return true;
}
return false;
}
static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
{
switch (mode) {
case XGBE_MODE_KX_1000:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_1000baseKX_Full);
case XGBE_MODE_KX_2500:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_2500baseX_Full);
case XGBE_MODE_KR:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_10000baseKR_Full);
default:
return false;
}
}
static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
switch (speed) {
case SPEED_1000:
if (phy_data->speed_set != XGBE_SPEEDSET_1000_10000)
return false;
return true;
case SPEED_2500:
if (phy_data->speed_set != XGBE_SPEEDSET_2500_10000)
return false;
return true;
case SPEED_10000:
return true;
default:
return false;
}
}
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata)
{
unsigned int reg;
/* Link status is latched low, so read once to clear
* and then read again to get current state
*/
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
return (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
}
static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
/* Nothing uniquely required for stop */
}
static int xgbe_phy_start(struct xgbe_prv_data *pdata)
{
/* Nothing uniquely required for start */
return 0;
}
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
unsigned int reg, count;
/* Perform a software reset of the PCS */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg |= MDIO_CTRL1_RESET;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
count = 50;
do {
msleep(20);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
} while ((reg & MDIO_CTRL1_RESET) && --count);
if (reg & MDIO_CTRL1_RESET)
return -ETIMEDOUT;
return 0;
}
static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
{
/* Nothing uniquely required for exit */
}
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data;
int ret;
phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
/* Retrieve the PHY speedset */
ret = device_property_read_u32(pdata->phy_dev, XGBE_SPEEDSET_PROPERTY,
&phy_data->speed_set);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_SPEEDSET_PROPERTY);
return ret;
}
switch (phy_data->speed_set) {
case XGBE_SPEEDSET_1000_10000:
case XGBE_SPEEDSET_2500_10000:
break;
default:
dev_err(pdata->dev, "invalid %s property\n",
XGBE_SPEEDSET_PROPERTY);
return -EINVAL;
}
/* Retrieve the PHY configuration properties */
if (device_property_present(pdata->phy_dev, XGBE_BLWC_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_BLWC_PROPERTY,
phy_data->blwc,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_BLWC_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->blwc, xgbe_phy_blwc,
sizeof(phy_data->blwc));
}
if (device_property_present(pdata->phy_dev, XGBE_CDR_RATE_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_CDR_RATE_PROPERTY,
phy_data->cdr_rate,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_CDR_RATE_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->cdr_rate, xgbe_phy_cdr_rate,
sizeof(phy_data->cdr_rate));
}
if (device_property_present(pdata->phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_PQ_SKEW_PROPERTY,
phy_data->pq_skew,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_PQ_SKEW_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->pq_skew, xgbe_phy_pq_skew,
sizeof(phy_data->pq_skew));
}
if (device_property_present(pdata->phy_dev, XGBE_TX_AMP_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_TX_AMP_PROPERTY,
phy_data->tx_amp,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_TX_AMP_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->tx_amp, xgbe_phy_tx_amp,
sizeof(phy_data->tx_amp));
}
if (device_property_present(pdata->phy_dev, XGBE_DFE_CFG_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_DFE_CFG_PROPERTY,
phy_data->dfe_tap_cfg,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_DFE_CFG_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->dfe_tap_cfg, xgbe_phy_dfe_tap_cfg,
sizeof(phy_data->dfe_tap_cfg));
}
if (device_property_present(pdata->phy_dev, XGBE_DFE_ENA_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_DFE_ENA_PROPERTY,
phy_data->dfe_tap_ena,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_DFE_ENA_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->dfe_tap_ena, xgbe_phy_dfe_tap_ena,
sizeof(phy_data->dfe_tap_ena));
}
/* Initialize supported features */
pdata->phy.supported = SUPPORTED_Autoneg;
pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
pdata->phy.supported |= SUPPORTED_Backplane;
pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
switch (phy_data->speed_set) {
case XGBE_SPEEDSET_1000_10000:
pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
break;
case XGBE_SPEEDSET_2500_10000:
pdata->phy.supported |= SUPPORTED_2500baseX_Full;
break;
}
if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
pdata->phy_data = phy_data;
return 0;
}
void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *phy_if)
{
struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
phy_impl->init = xgbe_phy_init;
phy_impl->exit = xgbe_phy_exit;
phy_impl->reset = xgbe_phy_reset;
phy_impl->start = xgbe_phy_start;
phy_impl->stop = xgbe_phy_stop;
phy_impl->link_status = xgbe_phy_link_status;
phy_impl->valid_speed = xgbe_phy_valid_speed;
phy_impl->use_mode = xgbe_phy_use_mode;
phy_impl->set_mode = xgbe_phy_set_mode;
phy_impl->get_mode = xgbe_phy_get_mode;
phy_impl->switch_mode = xgbe_phy_switch_mode;
phy_impl->cur_mode = xgbe_phy_cur_mode;
phy_impl->an_mode = xgbe_phy_an_mode;
phy_impl->an_outcome = xgbe_phy_an_outcome;
phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
phy_impl->kr_training_post = xgbe_phy_kr_training_post;
}
/*
* AMD 10Gb Ethernet driver
*
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
* Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
* your option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
*
* License 2: Modified BSD
*
* Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
#include <linux/mdio.h>
#include "xgbe.h"
#include "xgbe-common.h"
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[];
static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
{
const struct acpi_device_id *id;
id = acpi_match_device(xgbe_acpi_match, pdata->dev);
return id ? (struct xgbe_version_data *)id->driver_data : NULL;
}
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
u32 property;
int ret;
/* Obtain the system clock setting */
ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
if (ret) {
dev_err(dev, "unable to obtain %s property\n",
XGBE_ACPI_DMA_FREQ);
return ret;
}
pdata->sysclk_rate = property;
/* Obtain the PTP clock setting */
ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
if (ret) {
dev_err(dev, "unable to obtain %s property\n",
XGBE_ACPI_PTP_FREQ);
return ret;
}
pdata->ptpclk_rate = property;
return 0;
}
#else /* CONFIG_ACPI */
static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
{
return NULL;
}
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
#endif /* CONFIG_ACPI */
#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[];
static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
{
const struct of_device_id *id;
id = of_match_device(xgbe_of_match, pdata->dev);
return id ? (struct xgbe_version_data *)id->data : NULL;
}
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
/* Obtain the system clock setting */
pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
if (IS_ERR(pdata->sysclk)) {
dev_err(dev, "dma devm_clk_get failed\n");
return PTR_ERR(pdata->sysclk);
}
pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
/* Obtain the PTP clock setting */
pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
if (IS_ERR(pdata->ptpclk)) {
dev_err(dev, "ptp devm_clk_get failed\n");
return PTR_ERR(pdata->ptpclk);
}
pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
return 0;
}
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
struct device_node *phy_node;
struct platform_device *phy_pdev;
phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
if (phy_node) {
/* Old style device tree:
* The XGBE and PHY resources are separate
*/
phy_pdev = of_find_device_by_node(phy_node);
of_node_put(phy_node);
} else {
/* New style device tree:
* The XGBE and PHY resources are grouped together with
* the PHY resources listed last
*/
get_device(dev);
phy_pdev = pdata->platdev;
}
return phy_pdev;
}
#else /* CONFIG_OF */
static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
{
return NULL;
}
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
return NULL;
}
#endif /* CONFIG_OF */
static unsigned int xgbe_resource_count(struct platform_device *pdev,
unsigned int type)
{
unsigned int count;
int i;
for (i = 0, count = 0; i < pdev->num_resources; i++) {
struct resource *res = &pdev->resource[i];
if (type == resource_type(res))
count++;
}
return count;
}
static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
{
struct platform_device *phy_pdev;
if (pdata->use_acpi) {
get_device(pdata->dev);
phy_pdev = pdata->platdev;
} else {
phy_pdev = xgbe_of_get_phy_pdev(pdata);
}
return phy_pdev;
}
static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
{
return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
: xgbe_of_vdata(pdata);
}
static int xgbe_platform_probe(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
struct platform_device *phy_pdev;
struct resource *res;
const char *phy_mode;
unsigned int phy_memnum, phy_irqnum;
unsigned int dma_irqnum, dma_irqend;
enum dev_dma_attr attr;
int ret;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
ret = PTR_ERR(pdata);
goto err_alloc;
}
pdata->platdev = pdev;
pdata->adev = ACPI_COMPANION(dev);
platform_set_drvdata(pdev, pdata);
/* Check if we should use ACPI or DT */
pdata->use_acpi = dev->of_node ? 0 : 1;
/* Get the version data */
pdata->vdata = xgbe_get_vdata(pdata);
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
dev_err(dev, "unable to obtain phy device\n");
ret = -EINVAL;
goto err_phydev;
}
pdata->phy_platdev = phy_pdev;
pdata->phy_dev = &phy_pdev->dev;
if (pdev == phy_pdev) {
/* New style device tree or ACPI:
* The XGBE and PHY resources are grouped together with
* the PHY resources listed last
*/
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
dma_irqnum = 1;
dma_irqend = phy_irqnum;
} else {
/* Old style device tree:
* The XGBE and PHY resources are separate
*/
phy_memnum = 0;
phy_irqnum = 0;
dma_irqnum = 1;
dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
}
/* Obtain the mmio areas for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdata->xgmac_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->xgmac_regs)) {
dev_err(dev, "xgmac ioremap failed\n");
ret = PTR_ERR(pdata->xgmac_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
pdata->xpcs_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->xpcs_regs)) {
dev_err(dev, "xpcs ioremap failed\n");
ret = PTR_ERR(pdata->xpcs_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->rxtx_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(pdata->rxtx_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->sir0_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
ret = PTR_ERR(pdata->sir0_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->sir1_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
ret = PTR_ERR(pdata->sir1_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
/* Retrieve the MAC address */
ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
pdata->mac_addr,
sizeof(pdata->mac_addr));
if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
if (!ret)
ret = -EINVAL;
goto err_io;
}
/* Retrieve the PHY mode - it must be "xgmii" */
ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
&phy_mode);
if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
if (!ret)
ret = -EINVAL;
goto err_io;
}
pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
/* Check for per channel interrupt support */
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
pdata->per_channel_irq = 1;
/* Obtain device settings unique to ACPI/OF */
if (pdata->use_acpi)
ret = xgbe_acpi_support(pdata);
else
ret = xgbe_of_support(pdata);
if (ret)
goto err_io;
/* Set the DMA coherency values */
attr = device_get_dma_attr(dev);
if (attr == DEV_DMA_NOT_SUPPORTED) {
dev_err(dev, "DMA is not supported");
ret = -ENODEV;
goto err_io;
}
pdata->coherent = (attr == DEV_DMA_COHERENT);
if (pdata->coherent) {
pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
pdata->arcache = XGBE_DMA_OS_ARCACHE;
pdata->awcache = XGBE_DMA_OS_AWCACHE;
} else {
pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
pdata->arcache = XGBE_DMA_SYS_ARCACHE;
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
/* Set the maximum fifo amounts */
pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
/* Set the hardware channel and queue counts */
xgbe_set_counts(pdata);
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io;
}
pdata->dev_irq = ret;
/* Get the per channel DMA interrupts */
if (pdata->per_channel_irq) {
unsigned int i, max = ARRAY_SIZE(pdata->channel_irq);
for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
ret = platform_get_irq(pdata->platdev, dma_irqnum++);
if (ret < 0) {
netdev_err(pdata->netdev,
"platform_get_irq %u failed\n",
dma_irqnum - 1);
goto err_io;
}
pdata->channel_irq[i] = ret;
}
}
/* Get the auto-negotiation interrupt */
ret = platform_get_irq(phy_pdev, phy_irqnum++);
if (ret < 0) {
dev_err(dev, "platform_get_irq phy 0 failed\n");
goto err_io;
}
pdata->an_irq = ret;
/* Configure the netdev resource */
ret = xgbe_config_netdev(pdata);
if (ret)
goto err_io;
netdev_notice(pdata->netdev, "net device enabled\n");
return 0;
err_io:
platform_device_put(phy_pdev);
err_phydev:
xgbe_free_pdata(pdata);
err_alloc:
dev_notice(dev, "net device not enabled\n");
return ret;
}
static int xgbe_platform_remove(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
xgbe_deconfig_netdev(pdata);
platform_device_put(pdata->phy_platdev);
xgbe_free_pdata(pdata);
return 0;
}
#ifdef CONFIG_PM
static int xgbe_platform_suspend(struct device *dev)
{
struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
struct net_device *netdev = pdata->netdev;
int ret = 0;
DBGPR("-->xgbe_suspend\n");
if (netif_running(netdev))
ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
DBGPR("<--xgbe_suspend\n");
return ret;
}
static int xgbe_platform_resume(struct device *dev)
{
struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
struct net_device *netdev = pdata->netdev;
int ret = 0;
DBGPR("-->xgbe_resume\n");
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
if (netif_running(netdev)) {
ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
/* Schedule a restart in case the link or phy state changed
* while we were powered down.
*/
schedule_work(&pdata->restart_work);
}
DBGPR("<--xgbe_resume\n");
return ret;
}
#endif /* CONFIG_PM */
static const struct xgbe_version_data xgbe_v1 = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
.xpcs_access = XGBE_XPCS_ACCESS_V1,
.tx_max_fifo_size = 81920,
.rx_max_fifo_size = 81920,
};
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ .id = "AMDI8001",
.driver_data = (kernel_ulong_t)&xgbe_v1 },
{},
};
MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a",
.data = &xgbe_v1 },
{},
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
#endif
static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
xgbe_platform_suspend, xgbe_platform_resume);
static struct platform_driver xgbe_driver = {
.driver = {
.name = "amd-xgbe",
#ifdef CONFIG_ACPI
.acpi_match_table = xgbe_acpi_match,
#endif
#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
#endif
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
.remove = xgbe_platform_remove,
};
int xgbe_platform_init(void)
{
return platform_driver_register(&xgbe_driver);
}
void xgbe_platform_exit(void)
{
platform_driver_unregister(&xgbe_driver);
}
......@@ -129,7 +129,7 @@
#include <net/dcbnl.h>
#define XGBE_DRV_NAME "amd-xgbe"
#define XGBE_DRV_VERSION "1.0.2"
#define XGBE_DRV_VERSION "1.0.3"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
......@@ -158,7 +158,8 @@
#define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16
#define XGBE_DMA_STOP_TIMEOUT 5
#define XGBE_PRIORITY_QUEUES 8
#define XGBE_DMA_STOP_TIMEOUT 1
/* DMA cache settings - Outer sharable, write-back, write-allocate */
#define XGBE_DMA_OS_AXDOMAIN 0x2
......@@ -177,18 +178,19 @@
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
#define XGMAC_PFC_DATA_LEN 46
#define XGMAC_PFC_DELAYS 14000
#define XGMAC_PRIO_QUEUES(_cnt) \
min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, (_cnt))
/* Common property names */
#define XGBE_MAC_ADDR_PROPERTY "mac-address"
#define XGBE_PHY_MODE_PROPERTY "phy-mode"
#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
......@@ -208,7 +210,12 @@
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
#define XGBE_FIFO_MAX 81920
#define XGMAC_FIFO_MIN_ALLOC 2048
#define XGMAC_FIFO_UNIT 256
#define XGMAC_FIFO_ALIGN(_x) \
(((_x) + XGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
#define XGMAC_FIFO_FC_OFF 2048
#define XGMAC_FIFO_FC_MIN 4096
#define XGBE_TC_MIN_QUANTUM 10
......@@ -233,6 +240,14 @@
/* Flow control queue count */
#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
/* Flow control threshold units */
#define XGMAC_FLOW_CONTROL_UNIT 512
#define XGMAC_FLOW_CONTROL_ALIGN(_x) \
(((_x) + XGMAC_FLOW_CONTROL_UNIT - 1) & ~(XGMAC_FLOW_CONTROL_UNIT - 1))
#define XGMAC_FLOW_CONTROL_VALUE(_x) \
(((_x) < 1024) ? 0 : ((_x) / XGMAC_FLOW_CONTROL_UNIT) - 2)
#define XGMAC_FLOW_CONTROL_MAX 33280
/* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8
......@@ -244,46 +259,13 @@
/* Auto-negotiation */
#define XGBE_AN_MS_TIMEOUT 500
#define XGBE_LINK_TIMEOUT 10
#define XGBE_AN_INT_CMPLT 0x01
#define XGBE_AN_INC_LINK 0x02
#define XGBE_AN_PG_RCV 0x04
#define XGBE_AN_INT_MASK 0x07
/* Rate-change complete wait/retry count */
#define XGBE_RATECHANGE_COUNT 500
/* Default SerDes settings */
#define XGBE_SPEED_10000_BLWC 0
#define XGBE_SPEED_10000_CDR 0x7
#define XGBE_SPEED_10000_PLL 0x1
#define XGBE_SPEED_10000_PQ 0x12
#define XGBE_SPEED_10000_RATE 0x0
#define XGBE_SPEED_10000_TXAMP 0xa
#define XGBE_SPEED_10000_WORD 0x7
#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
#define XGBE_SPEED_2500_BLWC 1
#define XGBE_SPEED_2500_CDR 0x2
#define XGBE_SPEED_2500_PLL 0x0
#define XGBE_SPEED_2500_PQ 0xa
#define XGBE_SPEED_2500_RATE 0x1
#define XGBE_SPEED_2500_TXAMP 0xf
#define XGBE_SPEED_2500_WORD 0x1
#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
#define XGBE_SPEED_1000_BLWC 1
#define XGBE_SPEED_1000_CDR 0x2
#define XGBE_SPEED_1000_PLL 0x0
#define XGBE_SPEED_1000_PQ 0xa
#define XGBE_SPEED_1000_RATE 0x3
#define XGBE_SPEED_1000_TXAMP 0xf
#define XGBE_SPEED_1000_WORD 0x1
#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
#define XGBE_LINK_TIMEOUT 5
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
struct xgbe_prv_data;
......@@ -487,6 +469,18 @@ enum xgbe_speed {
XGBE_SPEEDS,
};
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
};
enum xgbe_an_mode {
XGBE_AN_MODE_CL73 = 0,
XGBE_AN_MODE_CL37,
XGBE_AN_MODE_CL37_SGMII,
XGBE_AN_MODE_NONE,
};
enum xgbe_an {
XGBE_AN_READY = 0,
XGBE_AN_PAGE_RECEIVED,
......@@ -504,8 +498,10 @@ enum xgbe_rx {
};
enum xgbe_mode {
XGBE_MODE_KR = 0,
XGBE_MODE_KX,
XGBE_MODE_KX_1000 = 0,
XGBE_MODE_KX_2500,
XGBE_MODE_KR,
XGBE_MODE_UNKNOWN,
};
enum xgbe_speedset {
......@@ -601,9 +597,7 @@ struct xgbe_hw_if {
int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
int (*set_gmii_speed)(struct xgbe_prv_data *);
int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
int (*set_xgmii_speed)(struct xgbe_prv_data *);
int (*set_speed)(struct xgbe_prv_data *, int);
void (*enable_tx)(struct xgbe_prv_data *);
void (*disable_tx)(struct xgbe_prv_data *);
......@@ -684,9 +678,53 @@ struct xgbe_hw_if {
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
/* This structure represents implementation specific routines for an
* implementation of a PHY. All routines are required unless noted below.
* Optional routines:
* kr_training_pre, kr_training_post
*/
struct xgbe_phy_impl_if {
/* Perform Setup/teardown actions */
int (*init)(struct xgbe_prv_data *);
void (*exit)(struct xgbe_prv_data *);
/* Perform start/stop specific actions */
int (*reset)(struct xgbe_prv_data *);
int (*start)(struct xgbe_prv_data *);
void (*stop)(struct xgbe_prv_data *);
/* Return the link status */
int (*link_status)(struct xgbe_prv_data *);
/* Indicate if a particular speed is valid */
bool (*valid_speed)(struct xgbe_prv_data *, int);
/* Check if the specified mode can/should be used */
bool (*use_mode)(struct xgbe_prv_data *, enum xgbe_mode);
/* Switch the PHY into various modes */
void (*set_mode)(struct xgbe_prv_data *, enum xgbe_mode);
/* Retrieve mode needed for a specific speed */
enum xgbe_mode (*get_mode)(struct xgbe_prv_data *, int);
/* Retrieve new/next mode when trying to auto-negotiate */
enum xgbe_mode (*switch_mode)(struct xgbe_prv_data *);
/* Retrieve current mode */
enum xgbe_mode (*cur_mode)(struct xgbe_prv_data *);
/* Retrieve current auto-negotiation mode */
enum xgbe_an_mode (*an_mode)(struct xgbe_prv_data *);
/* Process results of auto-negotiation */
enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
/* Pre/Post KR training enablement support */
void (*kr_training_pre)(struct xgbe_prv_data *);
void (*kr_training_post)(struct xgbe_prv_data *);
};
struct xgbe_phy_if {
/* For initial PHY setup */
void (*phy_init)(struct xgbe_prv_data *);
/* For PHY setup/teardown */
int (*phy_init)(struct xgbe_prv_data *);
void (*phy_exit)(struct xgbe_prv_data *);
/* For PHY support when setting device up/down */
int (*phy_reset)(struct xgbe_prv_data *);
......@@ -696,6 +734,12 @@ struct xgbe_phy_if {
/* For PHY support while device is up */
void (*phy_status)(struct xgbe_prv_data *);
int (*phy_config_aneg)(struct xgbe_prv_data *);
/* For PHY settings validation */
bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
/* PHY implementation specific services */
struct xgbe_phy_impl_if phy_impl;
};
struct xgbe_desc_if {
......@@ -755,11 +799,24 @@ struct xgbe_hw_features {
unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
};
struct xgbe_version_data {
void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *);
enum xgbe_xpcs_access xpcs_access;
unsigned int mmc_64bit;
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
};
struct xgbe_prv_data {
struct net_device *netdev;
struct platform_device *pdev;
struct platform_device *platdev;
struct acpi_device *adev;
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
/* Version related data */
struct xgbe_version_data *vdata;
/* ACPI or DT flag */
unsigned int use_acpi;
......@@ -776,6 +833,9 @@ struct xgbe_prv_data {
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
unsigned int xpcs_window;
unsigned int xpcs_window_size;
unsigned int xpcs_window_mask;
/* RSS addressing mutex */
struct mutex rss_mutex;
......@@ -785,6 +845,7 @@ struct xgbe_prv_data {
int dev_irq;
unsigned int per_channel_irq;
int channel_irq[XGBE_MAX_DMA_CHANNELS];
struct xgbe_hw_if hw_if;
struct xgbe_phy_if phy_if;
......@@ -803,12 +864,16 @@ struct xgbe_prv_data {
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
unsigned int tx_max_channel_count;
unsigned int rx_max_channel_count;
unsigned int channel_count;
unsigned int tx_ring_count;
unsigned int tx_desc_count;
unsigned int rx_ring_count;
unsigned int rx_desc_count;
unsigned int tx_max_q_count;
unsigned int rx_max_q_count;
unsigned int tx_q_count;
unsigned int rx_q_count;
......@@ -820,11 +885,13 @@ struct xgbe_prv_data {
unsigned int tx_threshold;
unsigned int tx_pbl;
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
/* Rx settings */
unsigned int rx_sf_mode;
unsigned int rx_threshold;
unsigned int rx_pbl;
unsigned int rx_max_fifo_size;
/* Tx coalescing settings */
unsigned int tx_usecs;
......@@ -842,6 +909,8 @@ struct xgbe_prv_data {
unsigned int pause_autoneg;
unsigned int tx_pause;
unsigned int rx_pause;
unsigned int rx_rfa[XGBE_MAX_QUEUES];
unsigned int rx_rfd[XGBE_MAX_QUEUES];
/* Receive Side Scaling settings */
u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
......@@ -881,6 +950,8 @@ struct xgbe_prv_data {
struct ieee_pfc *pfc;
unsigned int q2tc_map[XGBE_MAX_QUEUES];
unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
unsigned int pfcq[XGBE_MAX_QUEUES];
unsigned int pfc_rfa;
u8 num_tcs;
/* Hardware features of the device */
......@@ -901,6 +972,8 @@ struct xgbe_prv_data {
int phy_speed;
/* MDIO/PHY related settings */
unsigned int phy_started;
void *phy_data;
struct xgbe_phy phy;
int mdio_mmd;
unsigned long link_check;
......@@ -911,23 +984,9 @@ struct xgbe_prv_data {
int an_irq;
struct work_struct an_irq_work;
unsigned int speed_set;
/* SerDes UEFI configurable settings.
* Switching between modes/speeds requires new values for some
* SerDes settings. The values can be supplied as device
* properties in array format. The first array entry is for
* 1GbE, second for 2.5GbE and third for 10GbE
*/
u32 serdes_blwc[XGBE_SPEEDS];
u32 serdes_cdr_rate[XGBE_SPEEDS];
u32 serdes_pq_skew[XGBE_SPEEDS];
u32 serdes_tx_amp[XGBE_SPEEDS];
u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
/* Auto-negotiation state machine support */
unsigned int an_int;
unsigned int an_status;
struct mutex an_mutex;
enum xgbe_an an_result;
enum xgbe_an an_state;
......@@ -938,6 +997,7 @@ struct xgbe_prv_data {
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
enum xgbe_an_mode an_mode;
unsigned int lpm_ctrl; /* CTRL1 for resume */
......@@ -952,9 +1012,18 @@ struct xgbe_prv_data {
};
/* Function prototypes*/
struct xgbe_prv_data *xgbe_alloc_pdata(struct device *);
void xgbe_free_pdata(struct xgbe_prv_data *);
void xgbe_set_counts(struct xgbe_prv_data *);
int xgbe_config_netdev(struct xgbe_prv_data *);
void xgbe_deconfig_netdev(struct xgbe_prv_data *);
int xgbe_platform_init(void);
void xgbe_platform_exit(void);
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
const struct net_device_ops *xgbe_get_netdev_ops(void);
const struct ethtool_ops *xgbe_get_ethtool_ops(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment