Commit 298bfe27 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlxbf_gige-add-bluefield-3-support'

David Thompson says:

====================
mlxbf_gige: add BlueField-3 support

This patch series adds driver logic to the "mlxbf_gige"
Ethernet driver in order to support the third generation
BlueField SoC (BF3).  The existing "mlxbf_gige" driver is
extended with BF3-specific logic and run-time decisions
are made by the driver depending on the SoC generation
(BF2 vs. BF3).

The BF3 SoC is similar to BF2 SoC with regards to transmit
and receive packet processing:
       * Driver rings usage; consumer & producer indices
       * Single queue for receive and transmit
       * DMA operation

The differences between BF3 and BF2 SoC are:
       * In addition to supporting 1Gbps interface speed, the BF3 SoC
         adds support for 10Mbps and 100Mbps interface speeds
       * BF3 requires SerDes config logic to support its SGMII interface
       * BF3 adds support for "ethtool -s" for interface speed config
       * BF3 utilizes different MDIO logic for accessing the
         board-level PHY device

Testing
  - Successful build of kernel for ARM64, ARM32, X86_64
  - Tested ARM64 build on FastModels, Palladium, SoC
====================

Link: https://lore.kernel.org/r/20230112202609.21331-1-davthompson@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents e2a95750 e1cc8ce4
......@@ -14,6 +14,7 @@
#include <linux/irqreturn.h>
#include <linux/netdevice.h>
#include <linux/irq.h>
#include <linux/phy.h>
/* The silicon design supports a maximum RX ring size of
* 32K entries. Based on current testing this maximum size
......@@ -67,6 +68,29 @@ struct mlxbf_gige_stats {
u64 rx_filter_discard_pkts;
};
struct mlxbf_gige_reg_param {
u32 mask;
u32 shift;
};
struct mlxbf_gige_mdio_gw {
u32 gw_address;
u32 read_data_address;
struct mlxbf_gige_reg_param busy;
struct mlxbf_gige_reg_param write_data;
struct mlxbf_gige_reg_param read_data;
struct mlxbf_gige_reg_param devad;
struct mlxbf_gige_reg_param partad;
struct mlxbf_gige_reg_param opcode;
struct mlxbf_gige_reg_param st1;
};
struct mlxbf_gige_link_cfg {
void (*set_phy_link_mode)(struct phy_device *phydev);
void (*adjust_link)(struct net_device *netdev);
phy_interface_t phy_mode;
};
struct mlxbf_gige {
void __iomem *base;
void __iomem *llu_base;
......@@ -102,6 +126,9 @@ struct mlxbf_gige {
u8 valid_polarity;
struct napi_struct napi;
struct mlxbf_gige_stats stats;
u8 hw_version;
struct mlxbf_gige_mdio_gw *mdio_gw;
int prev_speed;
};
/* Rx Work Queue Element definitions */
......
......@@ -135,4 +135,5 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = mlxbf_gige_get_pauseparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
......@@ -205,7 +205,7 @@ static int mlxbf_gige_stop(struct net_device *netdev)
}
static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
struct ifreq *ifr, int cmd)
{
if (!(netif_running(netdev)))
return -EINVAL;
......@@ -263,13 +263,99 @@ static const struct net_device_ops mlxbf_gige_netdev_ops = {
.ndo_get_stats64 = mlxbf_gige_get_stats64,
};
static void mlxbf_gige_adjust_link(struct net_device *netdev)
static void mlxbf_gige_bf2_adjust_link(struct net_device *netdev)
{
struct phy_device *phydev = netdev->phydev;
phy_print_status(phydev);
}
static void mlxbf_gige_bf3_adjust_link(struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
u8 sgmii_mode;
u16 ipg_size;
u32 val;
if (phydev->link && phydev->speed != priv->prev_speed) {
switch (phydev->speed) {
case 1000:
ipg_size = MLXBF_GIGE_1G_IPG_SIZE;
sgmii_mode = MLXBF_GIGE_1G_SGMII_MODE;
break;
case 100:
ipg_size = MLXBF_GIGE_100M_IPG_SIZE;
sgmii_mode = MLXBF_GIGE_100M_SGMII_MODE;
break;
case 10:
ipg_size = MLXBF_GIGE_10M_IPG_SIZE;
sgmii_mode = MLXBF_GIGE_10M_SGMII_MODE;
break;
default:
return;
}
val = readl(priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
val &= ~(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK | MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK);
val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK, ipg_size);
val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK, sgmii_mode);
writel(val, priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
val = readl(priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
val &= ~MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK;
val |= FIELD_PREP(MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK, sgmii_mode);
writel(val, priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
priv->prev_speed = phydev->speed;
}
phy_print_status(phydev);
}
static void mlxbf_gige_bf2_set_phy_link_mode(struct phy_device *phydev)
{
/* MAC only supports 1000T full duplex mode */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
/* Only symmetric pause with flow control enabled is supported so no
* need to negotiate pause.
*/
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
}
static void mlxbf_gige_bf3_set_phy_link_mode(struct phy_device *phydev)
{
/* MAC only supports full duplex mode */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
/* Only symmetric pause with flow control enabled is supported so no
* need to negotiate pause.
*/
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
}
static struct mlxbf_gige_link_cfg mlxbf_gige_link_cfgs[] = {
[MLXBF_GIGE_VERSION_BF2] = {
.set_phy_link_mode = mlxbf_gige_bf2_set_phy_link_mode,
.adjust_link = mlxbf_gige_bf2_adjust_link,
.phy_mode = PHY_INTERFACE_MODE_GMII
},
[MLXBF_GIGE_VERSION_BF3] = {
.set_phy_link_mode = mlxbf_gige_bf3_set_phy_link_mode,
.adjust_link = mlxbf_gige_bf3_adjust_link,
.phy_mode = PHY_INTERFACE_MODE_SGMII
}
};
static int mlxbf_gige_probe(struct platform_device *pdev)
{
struct phy_device *phydev;
......@@ -315,6 +401,8 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
priv->hw_version = readq(base + MLXBF_GIGE_VERSION);
/* Attach MDIO device */
err = mlxbf_gige_mdio_probe(pdev, priv);
if (err)
......@@ -357,25 +445,14 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
phydev->irq = phy_irq;
err = phy_connect_direct(netdev, phydev,
mlxbf_gige_adjust_link,
PHY_INTERFACE_MODE_GMII);
mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
if (err) {
dev_err(&pdev->dev, "Could not attach to PHY\n");
goto out;
}
/* MAC only supports 1000T full duplex mode */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
/* Only symmetric pause with flow control enabled is supported so no
* need to negotiate pause.
*/
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
mlxbf_gige_link_cfgs[priv->hw_version].set_phy_link_mode(phydev);
/* Display information about attached PHY device */
phy_attached_info(phydev);
......
......@@ -23,9 +23,75 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
#include "mlxbf_gige_mdio_bf2.h"
#include "mlxbf_gige_mdio_bf3.h"
#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
static struct mlxbf_gige_mdio_gw mlxbf_gige_mdio_gw_t[] = {
[MLXBF_GIGE_VERSION_BF2] = {
.gw_address = MLXBF2_GIGE_MDIO_GW_OFFSET,
.read_data_address = MLXBF2_GIGE_MDIO_GW_OFFSET,
.busy = {
.mask = MLXBF2_GIGE_MDIO_GW_BUSY_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT,
},
.read_data = {
.mask = MLXBF2_GIGE_MDIO_GW_AD_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT,
},
.write_data = {
.mask = MLXBF2_GIGE_MDIO_GW_AD_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT,
},
.devad = {
.mask = MLXBF2_GIGE_MDIO_GW_DEVAD_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT,
},
.partad = {
.mask = MLXBF2_GIGE_MDIO_GW_PARTAD_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT,
},
.opcode = {
.mask = MLXBF2_GIGE_MDIO_GW_OPCODE_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT,
},
.st1 = {
.mask = MLXBF2_GIGE_MDIO_GW_ST1_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_ST1_SHIFT,
},
},
[MLXBF_GIGE_VERSION_BF3] = {
.gw_address = MLXBF3_GIGE_MDIO_GW_OFFSET,
.read_data_address = MLXBF3_GIGE_MDIO_DATA_READ,
.busy = {
.mask = MLXBF3_GIGE_MDIO_GW_BUSY_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT,
},
.read_data = {
.mask = MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT,
},
.write_data = {
.mask = MLXBF3_GIGE_MDIO_GW_DATA_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_DATA_SHIFT,
},
.devad = {
.mask = MLXBF3_GIGE_MDIO_GW_DEVAD_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT,
},
.partad = {
.mask = MLXBF3_GIGE_MDIO_GW_PARTAD_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT,
},
.opcode = {
.mask = MLXBF3_GIGE_MDIO_GW_OPCODE_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT,
},
.st1 = {
.mask = MLXBF3_GIGE_MDIO_GW_ST1_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_ST1_SHIFT,
},
},
};
#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
......@@ -47,30 +113,10 @@
/* Busy bit is set by software and cleared by hardware */
#define MLXBF_GIGE_MDIO_SET_BUSY 0x1
/* MDIO GW register bits */
#define MLXBF_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0)
#define MLXBF_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16)
#define MLXBF_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
#define MLXBF_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
#define MLXBF_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28)
#define MLXBF_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
/* MDIO config register bits */
#define MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
#define MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2)
#define MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4)
#define MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8)
#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
#define MLXBF_GIGE_BF3_COREPLL_ADDR 0x13409824
#define MLXBF_GIGE_BF3_COREPLL_SIZE 0x00000010
static struct resource corepll_params[] = {
[MLXBF_GIGE_VERSION_BF2] = {
......@@ -78,6 +124,11 @@ static struct resource corepll_params[] = {
.end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
.name = "COREPLL_RES"
},
[MLXBF_GIGE_VERSION_BF3] = {
.start = MLXBF_GIGE_BF3_COREPLL_ADDR,
.end = MLXBF_GIGE_BF3_COREPLL_ADDR + MLXBF_GIGE_BF3_COREPLL_SIZE - 1,
.name = "COREPLL_RES"
}
};
/* Returns core clock i1clk in Hz */
......@@ -134,19 +185,23 @@ static u8 mdio_period_map(struct mlxbf_gige *priv)
return mdio_period;
}
static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
static u32 mlxbf_gige_mdio_create_cmd(struct mlxbf_gige_mdio_gw *mdio_gw, u16 data, int phy_add,
int phy_reg, u32 opcode)
{
u32 gw_reg = 0;
gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_AD_MASK, data);
gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_DEVAD_MASK, phy_reg);
gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_PARTAD_MASK, phy_add);
gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_OPCODE_MASK, opcode);
gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_ST1_MASK,
MLXBF_GIGE_MDIO_CL22_ST1);
gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_BUSY_MASK,
MLXBF_GIGE_MDIO_SET_BUSY);
gw_reg |= ((data << mdio_gw->write_data.shift) &
mdio_gw->write_data.mask);
gw_reg |= ((phy_reg << mdio_gw->devad.shift) &
mdio_gw->devad.mask);
gw_reg |= ((phy_add << mdio_gw->partad.shift) &
mdio_gw->partad.mask);
gw_reg |= ((opcode << mdio_gw->opcode.shift) &
mdio_gw->opcode.mask);
gw_reg |= ((MLXBF_GIGE_MDIO_CL22_ST1 << mdio_gw->st1.shift) &
mdio_gw->st1.mask);
gw_reg |= ((MLXBF_GIGE_MDIO_SET_BUSY << mdio_gw->busy.shift) &
mdio_gw->busy.mask);
return gw_reg;
}
......@@ -162,25 +217,26 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
return -EOPNOTSUPP;
/* Send mdio read request */
cmd = mlxbf_gige_mdio_create_cmd(0, phy_add, phy_reg, MLXBF_GIGE_MDIO_CL22_READ);
cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, 0, phy_add, phy_reg,
MLXBF_GIGE_MDIO_CL22_READ);
writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address);
ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address,
val, !(val & priv->mdio_gw->busy.mask),
5, 1000000);
if (ret) {
writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
ret = readl(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
ret = readl(priv->mdio_io + priv->mdio_gw->read_data_address);
/* Only return ad bits of the gw register */
ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
ret &= priv->mdio_gw->read_data.mask;
/* The MDIO lock is set on read. To release it, clear gw register */
writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
......@@ -197,17 +253,17 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
return -EOPNOTSUPP;
/* Send mdio write request */
cmd = mlxbf_gige_mdio_create_cmd(val, phy_add, phy_reg,
cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, val, phy_add, phy_reg,
MLXBF_GIGE_MDIO_CL22_WRITE);
writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address);
/* If the poll timed out, drop the request */
ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address,
temp, !(temp & priv->mdio_gw->busy.mask),
5, 1000000);
/* The MDIO lock is set on read. To release it, clear gw register */
writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
......@@ -219,9 +275,20 @@ static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
mdio_period = mdio_period_map(priv);
val = MLXBF_GIGE_MDIO_CFG_VAL;
val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
val = MLXBF2_GIGE_MDIO_CFG_VAL;
val |= FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
writel(val, priv->mdio_io + MLXBF2_GIGE_MDIO_CFG_OFFSET);
} else {
val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) |
FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1);
writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG0);
val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG1);
val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) |
FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13);
writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG2);
}
}
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
......@@ -230,6 +297,9 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
struct resource *res;
int ret;
if (priv->hw_version > MLXBF_GIGE_VERSION_BF3)
return -ENODEV;
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
......@@ -242,13 +312,15 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
/* For backward compatibility with older ACPI tables, also keep
* CLK resource internal to the driver.
*/
res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
res = &corepll_params[priv->hw_version];
}
priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->clk_io)
return -ENOMEM;
priv->mdio_gw = &mlxbf_gige_mdio_gw_t[priv->hw_version];
mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
......
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/* MDIO support for Mellanox Gigabit Ethernet driver
*
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, ALL RIGHTS RESERVED.
*
* This software product is a proprietary product of NVIDIA CORPORATION &
* AFFILIATES (the "Company") and all right, title, and interest in and to the
* software product, including all associated intellectual property rights, are
* and shall remain exclusively with the Company.
*
* This software product is governed by the End User License Agreement
* provided with the software product.
*/
#ifndef __MLXBF_GIGE_MDIO_BF2_H__
#define __MLXBF_GIGE_MDIO_BF2_H__
#include <linux/bitfield.h>
#define MLXBF2_GIGE_MDIO_GW_OFFSET 0x0
#define MLXBF2_GIGE_MDIO_CFG_OFFSET 0x4
/* MDIO GW register bits */
#define MLXBF2_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0)
#define MLXBF2_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16)
#define MLXBF2_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
#define MLXBF2_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
#define MLXBF2_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28)
#define MLXBF2_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
#define MLXBF2_GIGE_MDIO_GW_AD_SHIFT 0
#define MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT 16
#define MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT 21
#define MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT 26
#define MLXBF2_GIGE_MDIO_GW_ST1_SHIFT 28
#define MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT 30
/* MDIO config register bits */
#define MLXBF2_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
#define MLXBF2_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2)
#define MLXBF2_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4)
#define MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8)
#define MLXBF2_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
#define MLXBF2_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
#define MLXBF2_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
#endif /* __MLXBF_GIGE_MDIO_BF2_H__ */
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/* MDIO support for Mellanox Gigabit Ethernet driver
*
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, ALL RIGHTS RESERVED.
*
* This software product is a proprietary product of NVIDIA CORPORATION &
* AFFILIATES (the "Company") and all right, title, and interest in and to the
* software product, including all associated intellectual property rights, are
* and shall remain exclusively with the Company.
*
* This software product is governed by the End User License Agreement
* provided with the software product.
*/
#ifndef __MLXBF_GIGE_MDIO_BF3_H__
#define __MLXBF_GIGE_MDIO_BF3_H__
#include <linux/bitfield.h>
#define MLXBF3_GIGE_MDIO_GW_OFFSET 0x80
#define MLXBF3_GIGE_MDIO_DATA_READ 0x8c
#define MLXBF3_GIGE_MDIO_CFG_REG0 0x100
#define MLXBF3_GIGE_MDIO_CFG_REG1 0x104
#define MLXBF3_GIGE_MDIO_CFG_REG2 0x108
/* MDIO GW register bits */
#define MLXBF3_GIGE_MDIO_GW_ST1_MASK GENMASK(1, 1)
#define MLXBF3_GIGE_MDIO_GW_OPCODE_MASK GENMASK(3, 2)
#define MLXBF3_GIGE_MDIO_GW_PARTAD_MASK GENMASK(8, 4)
#define MLXBF3_GIGE_MDIO_GW_DEVAD_MASK GENMASK(13, 9)
/* For BlueField-3, this field is only used for mdio write */
#define MLXBF3_GIGE_MDIO_GW_DATA_MASK GENMASK(29, 14)
#define MLXBF3_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
#define MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK GENMASK(15, 0)
#define MLXBF3_GIGE_MDIO_GW_ST1_SHIFT 1
#define MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT 2
#define MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT 4
#define MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT 9
#define MLXBF3_GIGE_MDIO_GW_DATA_SHIFT 14
#define MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT 30
#define MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT 0
/* MDIO config register bits */
#define MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
#define MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(2, 2)
#define MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(7, 0)
#define MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(7, 0)
#define MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(15, 8)
#endif /* __MLXBF_GIGE_MDIO_BF3_H__ */
......@@ -8,8 +8,11 @@
#ifndef __MLXBF_GIGE_REGS_H__
#define __MLXBF_GIGE_REGS_H__
#include <linux/bitfield.h>
#define MLXBF_GIGE_VERSION 0x0000
#define MLXBF_GIGE_VERSION_BF2 0x0
#define MLXBF_GIGE_VERSION_BF3 0x1
#define MLXBF_GIGE_STATUS 0x0010
#define MLXBF_GIGE_STATUS_READY BIT(0)
#define MLXBF_GIGE_INT_STATUS 0x0028
......@@ -77,4 +80,23 @@
*/
#define MLXBF_GIGE_MMIO_REG_SZ (MLXBF_GIGE_MAC_CFG + 8)
#define MLXBF_GIGE_PLU_TX_REG0 0x80
#define MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK GENMASK(11, 0)
#define MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK GENMASK(15, 14)
#define MLXBF_GIGE_PLU_RX_REG0 0x10
#define MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK GENMASK(25, 24)
#define MLXBF_GIGE_1G_SGMII_MODE 0x0
#define MLXBF_GIGE_10M_SGMII_MODE 0x1
#define MLXBF_GIGE_100M_SGMII_MODE 0x2
/* ipg_size default value for 1G is fixed by HW to 11 + End = 12.
* So for 100M it is 12 * 10 - 1 = 119
* For 10M, it is 12 * 100 - 1 = 1199
*/
#define MLXBF_GIGE_1G_IPG_SIZE 11
#define MLXBF_GIGE_100M_IPG_SIZE 119
#define MLXBF_GIGE_10M_IPG_SIZE 1199
#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment