Commit 1e8c86a6 authored by David S. Miller's avatar David S. Miller

Merge branch 'xgene-jumbo-and-pause-frame'

Iyappan Subramanian says:

====================
drivers: net: xgene: Add Jumbo and Pause frame support

This patch set adds,

1. Jumbo frame support
2. Pause frame based flow control

and fixes RSS for non-TCP/UDP packets.
====================
Signed-off-by: default avatarIyappan Subramanian <isubramanian@apm.com>
parents 397c5ad1 0296fe4d
...@@ -52,6 +52,7 @@ static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, ...@@ -52,6 +52,7 @@ static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
{ {
buf[0] = SET_VAL(CLE_DROP, dbptr->drop); buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) |
SET_VAL(CLE_DSTQIDL, dbptr->dstqid); SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) | buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
...@@ -346,11 +347,15 @@ static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) ...@@ -346,11 +347,15 @@ static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
idx = i % pdata->rxq_cnt; idx = i % pdata->rxq_cnt;
pool_id = pdata->rx_ring[idx]->buf_pool->id; pool_id = pdata->rx_ring[idx]->buf_pool->id;
fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; fpsel = xgene_enet_get_fpsel(pool_id);
dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
nfpsel = 0; nfpsel = 0;
idt_reg = 0; if (pdata->rx_ring[idx]->page_pool) {
pool_id = pdata->rx_ring[idx]->page_pool->id;
nfpsel = xgene_enet_get_fpsel(pool_id);
}
idt_reg = 0;
xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg); xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
RSS_IDT, CLE_CMD_WR); RSS_IDT, CLE_CMD_WR);
...@@ -400,9 +405,9 @@ static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata) ...@@ -400,9 +405,9 @@ static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
{ {
struct xgene_enet_cle *enet_cle = &pdata->cle; struct xgene_enet_cle *enet_cle = &pdata->cle;
u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
struct xgene_cle_ptree_branch *br; struct xgene_cle_ptree_branch *br;
u32 def_qid, def_fpsel, pool_id;
struct xgene_cle_ptree *ptree; struct xgene_cle_ptree *ptree;
struct xgene_cle_ptree_kn kn; struct xgene_cle_ptree_kn kn;
int ret; int ret;
...@@ -480,11 +485,11 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -480,11 +485,11 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
}, },
{ {
.valid = 0, .valid = 0,
.next_packet_pointer = 260, .next_packet_pointer = 26,
.jump_bw = JMP_FW, .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, .jump_rel = JMP_ABS,
.operation = EQT, .operation = EQT,
.next_node = LAST_NODE, .next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 0, .next_branch = 0,
.data = 0x0, .data = 0x0,
.mask = 0xffff .mask = 0xffff
...@@ -661,6 +666,92 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -661,6 +666,92 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
} }
} }
}, },
{
/* RSS_IPV4_OTHERS_NODE */
.node_type = EWDN,
.last_node = 0,
.hdr_len_store = 1,
.hdr_extn = NO_BYTE,
.byte_store = NO_BYTE,
.search_byte_store = BOTH_BYTES,
.result_pointer = DB_RES_DROP,
.num_branches = 6,
.branch = {
{
/* SRC IPV4 B01 */
.valid = 0,
.next_packet_pointer = 28,
.jump_bw = JMP_FW,
.jump_rel = JMP_ABS,
.operation = EQT,
.next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 1,
.data = 0x0,
.mask = 0xffff
},
{
/* SRC IPV4 B23 */
.valid = 0,
.next_packet_pointer = 30,
.jump_bw = JMP_FW,
.jump_rel = JMP_ABS,
.operation = EQT,
.next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 2,
.data = 0x0,
.mask = 0xffff
},
{
/* DST IPV4 B01 */
.valid = 0,
.next_packet_pointer = 32,
.jump_bw = JMP_FW,
.jump_rel = JMP_ABS,
.operation = EQT,
.next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 3,
.data = 0x0,
.mask = 0xffff
},
{
/* DST IPV4 B23 */
.valid = 0,
.next_packet_pointer = 34,
.jump_bw = JMP_FW,
.jump_rel = JMP_ABS,
.operation = EQT,
.next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 4,
.data = 0x0,
.mask = 0xffff
},
{
/* TCP SRC Port */
.valid = 0,
.next_packet_pointer = 36,
.jump_bw = JMP_FW,
.jump_rel = JMP_ABS,
.operation = EQT,
.next_node = RSS_IPV4_OTHERS_NODE,
.next_branch = 5,
.data = 0x0,
.mask = 0xffff
},
{
/* TCP DST Port */
.valid = 0,
.next_packet_pointer = 260,
.jump_bw = JMP_FW,
.jump_rel = JMP_ABS,
.operation = EQT,
.next_node = LAST_NODE,
.next_branch = 0,
.data = 0x0,
.mask = 0xffff
}
}
},
{ {
/* LAST NODE */ /* LAST NODE */
.node_type = EWDN, .node_type = EWDN,
...@@ -706,14 +797,21 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -706,14 +797,21 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
pool_id = pdata->rx_ring[0]->buf_pool->id; pool_id = pdata->rx_ring[0]->buf_pool->id;
def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; def_fpsel = xgene_enet_get_fpsel(pool_id);
def_nxtfpsel = 0;
if (pdata->rx_ring[0]->page_pool) {
pool_id = pdata->rx_ring[0]->page_pool->id;
def_nxtfpsel = xgene_enet_get_fpsel(pool_id);
}
memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
dbptr[DB_RES_ACCEPT].fpsel = def_fpsel; dbptr[DB_RES_ACCEPT].fpsel = def_fpsel;
dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel;
dbptr[DB_RES_ACCEPT].dstqid = def_qid; dbptr[DB_RES_ACCEPT].dstqid = def_qid;
dbptr[DB_RES_ACCEPT].cle_priority = 1; dbptr[DB_RES_ACCEPT].cle_priority = 1;
dbptr[DB_RES_DEF].fpsel = def_fpsel; dbptr[DB_RES_DEF].fpsel = def_fpsel;
dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel;
dbptr[DB_RES_DEF].dstqid = def_qid; dbptr[DB_RES_DEF].dstqid = def_qid;
dbptr[DB_RES_DEF].cle_priority = 7; dbptr[DB_RES_DEF].cle_priority = 7;
xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF], xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
......
...@@ -91,6 +91,8 @@ ...@@ -91,6 +91,8 @@
#define CLE_DSTQIDH_LEN 5 #define CLE_DSTQIDH_LEN 5
#define CLE_FPSEL_POS 21 #define CLE_FPSEL_POS 21
#define CLE_FPSEL_LEN 4 #define CLE_FPSEL_LEN 4
#define CLE_NFPSEL_POS 17
#define CLE_NFPSEL_LEN 4
#define CLE_PRIORITY_POS 5 #define CLE_PRIORITY_POS 5
#define CLE_PRIORITY_LEN 3 #define CLE_PRIORITY_LEN 3
...@@ -104,6 +106,7 @@ enum xgene_cle_ptree_nodes { ...@@ -104,6 +106,7 @@ enum xgene_cle_ptree_nodes {
PKT_PROT_NODE, PKT_PROT_NODE,
RSS_IPV4_TCP_NODE, RSS_IPV4_TCP_NODE,
RSS_IPV4_UDP_NODE, RSS_IPV4_UDP_NODE,
RSS_IPV4_OTHERS_NODE,
LAST_NODE, LAST_NODE,
MAX_NODES MAX_NODES
}; };
......
...@@ -163,6 +163,74 @@ static void xgene_get_ethtool_stats(struct net_device *ndev, ...@@ -163,6 +163,74 @@ static void xgene_get_ethtool_stats(struct net_device *ndev,
*data++ = *(u64 *)(pdata + gstrings_stats[i].offset); *data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
} }
static void xgene_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pp)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
pp->autoneg = pdata->pause_autoneg;
pp->tx_pause = pdata->tx_pause;
pp->rx_pause = pdata->rx_pause;
}
static int xgene_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pp)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
u32 oldadv, newadv;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (!phydev)
return -EINVAL;
if (!(phydev->supported & SUPPORTED_Pause) ||
(!(phydev->supported & SUPPORTED_Asym_Pause) &&
pp->rx_pause != pp->tx_pause))
return -EINVAL;
pdata->pause_autoneg = pp->autoneg;
pdata->tx_pause = pp->tx_pause;
pdata->rx_pause = pp->rx_pause;
oldadv = phydev->advertising;
newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
if (pp->rx_pause)
newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
if (pp->tx_pause)
newadv ^= ADVERTISED_Asym_Pause;
if (oldadv ^ newadv) {
phydev->advertising = newadv;
if (phydev->autoneg)
return phy_start_aneg(phydev);
if (!pp->autoneg) {
pdata->mac_ops->flowctl_tx(pdata,
pdata->tx_pause);
pdata->mac_ops->flowctl_rx(pdata,
pdata->rx_pause);
}
}
} else {
if (pp->autoneg)
return -EINVAL;
pdata->tx_pause = pp->tx_pause;
pdata->rx_pause = pp->rx_pause;
pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
}
return 0;
}
static const struct ethtool_ops xgene_ethtool_ops = { static const struct ethtool_ops xgene_ethtool_ops = {
.get_drvinfo = xgene_get_drvinfo, .get_drvinfo = xgene_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
...@@ -171,6 +239,8 @@ static const struct ethtool_ops xgene_ethtool_ops = { ...@@ -171,6 +239,8 @@ static const struct ethtool_ops xgene_ethtool_ops = {
.get_ethtool_stats = xgene_get_ethtool_stats, .get_ethtool_stats = xgene_get_ethtool_stats,
.get_link_ksettings = xgene_get_link_ksettings, .get_link_ksettings = xgene_get_link_ksettings,
.set_link_ksettings = xgene_set_link_ksettings, .set_link_ksettings = xgene_set_link_ksettings,
.get_pauseparam = xgene_get_pauseparam,
.set_pauseparam = xgene_set_pauseparam
}; };
void xgene_enet_set_ethtool_ops(struct net_device *ndev) void xgene_enet_set_ethtool_ops(struct net_device *ndev)
......
...@@ -504,6 +504,56 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) ...@@ -504,6 +504,56 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
} }
static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
{
xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size);
}
static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
bool enable)
{
u32 data;
xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
if (enable)
data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
else
data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
}
static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
if (enable)
data |= TX_FLOW_EN;
else
data &= ~TX_FLOW_EN;
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
pdata->mac_ops->enable_tx_pause(pdata, enable);
}
static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
if (enable)
data |= RX_FLOW_EN;
else
data &= ~RX_FLOW_EN;
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
}
static void xgene_gmac_init(struct xgene_enet_pdata *pdata) static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
{ {
u32 value; u32 value;
...@@ -527,6 +577,17 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata) ...@@ -527,6 +577,17 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
/* Rtype should be copied from FP */ /* Rtype should be copied from FP */
xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
/* Configure HW pause frame generation */
xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
value = (DEF_QUANTA << 16) | (value & 0xFFFF);
xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
/* Rx-Tx traffic resume */ /* Rx-Tx traffic resume */
xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
...@@ -550,12 +611,14 @@ static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) ...@@ -550,12 +611,14 @@ static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
} }
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
u32 dst_ring_num, u16 bufpool_id) u32 dst_ring_num, u16 bufpool_id,
u16 nxtbufpool_id)
{ {
u32 cb; u32 cb;
u32 fpsel; u32 fpsel, nxtfpsel;
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; fpsel = xgene_enet_get_fpsel(bufpool_id);
nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0; cb |= CFG_CLE_BYPASS_EN0;
...@@ -565,6 +628,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, ...@@ -565,6 +628,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
CFG_CLE_FPSEL0_SET(&cb, fpsel); CFG_CLE_FPSEL0_SET(&cb, fpsel);
CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
} }
...@@ -652,16 +716,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) ...@@ -652,16 +716,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
static void xgene_enet_clear(struct xgene_enet_pdata *pdata, static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring) struct xgene_enet_desc_ring *ring)
{ {
u32 addr, val, data; u32 addr, data;
val = xgene_enet_ring_bufnum(ring->id);
if (xgene_enet_is_bufpool(ring->id)) { if (xgene_enet_is_bufpool(ring->id)) {
addr = ENET_CFGSSQMIFPRESET_ADDR; addr = ENET_CFGSSQMIFPRESET_ADDR;
data = BIT(val - 0x20); data = BIT(xgene_enet_get_fpsel(ring->id));
} else { } else {
addr = ENET_CFGSSQMIWQRESET_ADDR; addr = ENET_CFGSSQMIWQRESET_ADDR;
data = BIT(val); data = BIT(xgene_enet_ring_bufnum(ring->id));
} }
xgene_enet_wr_ring_if(pdata, addr, data); xgene_enet_wr_ring_if(pdata, addr, data);
...@@ -671,24 +733,24 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) ...@@ -671,24 +733,24 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{ {
struct device *dev = &pdata->pdev->dev; struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring; struct xgene_enet_desc_ring *ring;
u32 pb, val; u32 pb;
int i; int i;
pb = 0; pb = 0;
for (i = 0; i < pdata->rxq_cnt; i++) { for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i]->buf_pool; ring = pdata->rx_ring[i]->buf_pool;
pb |= BIT(xgene_enet_get_fpsel(ring->id));
ring = pdata->rx_ring[i]->page_pool;
if (ring)
pb |= BIT(xgene_enet_get_fpsel(ring->id));
val = xgene_enet_ring_bufnum(ring->id);
pb |= BIT(val - 0x20);
} }
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
pb = 0; pb = 0;
for (i = 0; i < pdata->txq_cnt; i++) { for (i = 0; i < pdata->txq_cnt; i++) {
ring = pdata->tx_ring[i]; ring = pdata->tx_ring[i];
pb |= BIT(xgene_enet_ring_bufnum(ring->id));
val = xgene_enet_ring_bufnum(ring->id);
pb |= BIT(val);
} }
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
...@@ -698,6 +760,48 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) ...@@ -698,6 +760,48 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
} }
} }
static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
u16 lcladv, rmtadv = 0;
u32 rx_pause, tx_pause;
u8 flowctl = 0;
if (!phydev->duplex || !pdata->pause_autoneg)
return 0;
if (pdata->tx_pause)
flowctl |= FLOW_CTRL_TX;
if (pdata->rx_pause)
flowctl |= FLOW_CTRL_RX;
lcladv = mii_advertise_flowctrl(flowctl);
if (phydev->pause)
rmtadv = LPA_PAUSE_CAP;
if (phydev->asym_pause)
rmtadv |= LPA_PAUSE_ASYM;
flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
tx_pause = !!(flowctl & FLOW_CTRL_TX);
rx_pause = !!(flowctl & FLOW_CTRL_RX);
if (tx_pause != pdata->tx_pause) {
pdata->tx_pause = tx_pause;
pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
}
if (rx_pause != pdata->rx_pause) {
pdata->rx_pause = rx_pause;
pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
}
return 0;
}
static void xgene_enet_adjust_link(struct net_device *ndev) static void xgene_enet_adjust_link(struct net_device *ndev)
{ {
struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_pdata *pdata = netdev_priv(ndev);
...@@ -712,6 +816,8 @@ static void xgene_enet_adjust_link(struct net_device *ndev) ...@@ -712,6 +816,8 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
mac_ops->tx_enable(pdata); mac_ops->tx_enable(pdata);
phy_print_status(phydev); phy_print_status(phydev);
} }
xgene_enet_flowctrl_cfg(ndev);
} else { } else {
mac_ops->rx_disable(pdata); mac_ops->rx_disable(pdata);
mac_ops->tx_disable(pdata); mac_ops->tx_disable(pdata);
...@@ -785,6 +891,8 @@ int xgene_enet_phy_connect(struct net_device *ndev) ...@@ -785,6 +891,8 @@ int xgene_enet_phy_connect(struct net_device *ndev)
phy_dev->supported &= ~SUPPORTED_10baseT_Half & phy_dev->supported &= ~SUPPORTED_10baseT_Half &
~SUPPORTED_100baseT_Half & ~SUPPORTED_100baseT_Half &
~SUPPORTED_1000baseT_Half; ~SUPPORTED_1000baseT_Half;
phy_dev->supported |= SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
phy_dev->advertising = phy_dev->supported; phy_dev->advertising = phy_dev->supported;
return 0; return 0;
...@@ -902,6 +1010,10 @@ const struct xgene_mac_ops xgene_gmac_ops = { ...@@ -902,6 +1010,10 @@ const struct xgene_mac_ops xgene_gmac_ops = {
.tx_disable = xgene_gmac_tx_disable, .tx_disable = xgene_gmac_tx_disable,
.set_speed = xgene_gmac_set_speed, .set_speed = xgene_gmac_set_speed,
.set_mac_addr = xgene_gmac_set_mac_addr, .set_mac_addr = xgene_gmac_set_mac_addr,
.set_framesize = xgene_enet_set_frame_size,
.enable_tx_pause = xgene_gmac_enable_tx_pause,
.flowctl_tx = xgene_gmac_flowctl_tx,
.flowctl_rx = xgene_gmac_flowctl_rx,
}; };
const struct xgene_port_ops xgene_gport_ops = { const struct xgene_port_ops xgene_gport_ops = {
......
...@@ -165,10 +165,23 @@ enum xgene_enet_rm { ...@@ -165,10 +165,23 @@ enum xgene_enet_rm {
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) #define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) #define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) #define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
#define CFG_CLE_NXTFPSEL0_SET(dst, val) xgene_set_bits(dst, val, 20, 4)
#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) #define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2)
#define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16) #define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16)
#define CFG_CLE_DSTQID0(val) (val & GENMASK(11, 0)) #define CFG_CLE_DSTQID0(val) ((val) & GENMASK(11, 0))
#define CFG_CLE_FPSEL0(val) ((val << 16) & GENMASK(19, 16)) #define CFG_CLE_FPSEL0(val) (((val) << 16) & GENMASK(19, 16))
#define CSR_ECM_CFG_0_ADDR 0x0220
#define CSR_ECM_CFG_1_ADDR 0x0224
#define CSR_MULTI_DPF0_ADDR 0x0230
#define RXBUF_PAUSE_THRESH 0x0534
#define RXBUF_PAUSE_OFF_THRESH 0x0540
#define DEF_PAUSE_THRES 0x7d
#define DEF_PAUSE_OFF_THRES 0x6d
#define DEF_QUANTA 0x8000
#define NORM_PAUSE_OPCODE 0x0001
#define PAUSE_XON_EN BIT(30)
#define MULTI_DPF_AUTOCTRL BIT(28)
#define CFG_CLE_NXTFPSEL0(val) (((val) << 20) & GENMASK(23, 20))
#define ICM_CONFIG0_REG_0_ADDR 0x0400 #define ICM_CONFIG0_REG_0_ADDR 0x0400
#define ICM_CONFIG2_REG_0_ADDR 0x0410 #define ICM_CONFIG2_REG_0_ADDR 0x0410
#define RX_DV_GATE_REG_0_ADDR 0x05fc #define RX_DV_GATE_REG_0_ADDR 0x05fc
...@@ -196,6 +209,8 @@ enum xgene_enet_rm { ...@@ -196,6 +209,8 @@ enum xgene_enet_rm {
#define SOFT_RESET1 BIT(31) #define SOFT_RESET1 BIT(31)
#define TX_EN BIT(0) #define TX_EN BIT(0)
#define RX_EN BIT(2) #define RX_EN BIT(2)
#define TX_FLOW_EN BIT(4)
#define RX_FLOW_EN BIT(5)
#define ENET_LHD_MODE BIT(25) #define ENET_LHD_MODE BIT(25)
#define ENET_GHD_MODE BIT(26) #define ENET_GHD_MODE BIT(26)
#define FULL_DUPLEX2 BIT(0) #define FULL_DUPLEX2 BIT(0)
...@@ -346,6 +361,14 @@ static inline bool xgene_enet_is_bufpool(u16 id) ...@@ -346,6 +361,14 @@ static inline bool xgene_enet_is_bufpool(u16 id)
return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false; return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false;
} }
static inline u8 xgene_enet_get_fpsel(u16 id)
{
if (xgene_enet_is_bufpool(id))
return xgene_enet_ring_bufnum(id) - RING_BUFNUM_BUFPOOL;
return 0;
}
static inline u16 xgene_enet_get_numslots(u16 id, u32 size) static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
{ {
bool is_bufpool = xgene_enet_is_bufpool(id); bool is_bufpool = xgene_enet_is_bufpool(id);
......
...@@ -41,11 +41,14 @@ ...@@ -41,11 +41,14 @@
#include "../../../phy/mdio-xgene.h" #include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0" #define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536 #define XGENE_ENET_STD_MTU 1536
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) #define XGENE_ENET_MAX_MTU 9600
#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
#define BUFLEN_16K (16 * 1024) #define BUFLEN_16K (16 * 1024)
#define NUM_PKT_BUF 64 #define NUM_PKT_BUF 1024
#define NUM_BUFPOOL 32 #define NUM_BUFPOOL 32
#define NUM_NXTBUFPOOL 8
#define MAX_EXP_BUFFS 256 #define MAX_EXP_BUFFS 256
#define NUM_MSS_REG 4 #define NUM_MSS_REG 4
#define XGENE_MIN_ENET_FRAME_SIZE 60 #define XGENE_MIN_ENET_FRAME_SIZE 60
...@@ -88,6 +91,12 @@ enum xgene_enet_id { ...@@ -88,6 +91,12 @@ enum xgene_enet_id {
XGENE_ENET2 XGENE_ENET2
}; };
enum xgene_enet_buf_len {
SIZE_2K = 2048,
SIZE_4K = 4096,
SIZE_16K = 16384
};
/* software context of a descriptor ring */ /* software context of a descriptor ring */
struct xgene_enet_desc_ring { struct xgene_enet_desc_ring {
struct net_device *ndev; struct net_device *ndev;
...@@ -107,14 +116,18 @@ struct xgene_enet_desc_ring { ...@@ -107,14 +116,18 @@ struct xgene_enet_desc_ring {
dma_addr_t irq_mbox_dma; dma_addr_t irq_mbox_dma;
void *irq_mbox_addr; void *irq_mbox_addr;
u16 dst_ring_num; u16 dst_ring_num;
u8 nbufpool; u16 nbufpool;
int npagepool;
u8 index; u8 index;
u32 flags;
struct sk_buff *(*rx_skb); struct sk_buff *(*rx_skb);
struct sk_buff *(*cp_skb); struct sk_buff *(*cp_skb);
dma_addr_t *frag_dma_addr; dma_addr_t *frag_dma_addr;
struct page *(*frag_page);
enum xgene_enet_ring_cfgsize cfgsize; enum xgene_enet_ring_cfgsize cfgsize;
struct xgene_enet_desc_ring *cp_ring; struct xgene_enet_desc_ring *cp_ring;
struct xgene_enet_desc_ring *buf_pool; struct xgene_enet_desc_ring *buf_pool;
struct xgene_enet_desc_ring *page_pool;
struct napi_struct napi; struct napi_struct napi;
union { union {
void *desc_addr; void *desc_addr;
...@@ -143,8 +156,12 @@ struct xgene_mac_ops { ...@@ -143,8 +156,12 @@ struct xgene_mac_ops {
void (*rx_disable)(struct xgene_enet_pdata *pdata); void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_speed)(struct xgene_enet_pdata *pdata); void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata); void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
void (*set_framesize)(struct xgene_enet_pdata *pdata, int framesize);
void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index); void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index);
void (*link_state)(struct work_struct *work); void (*link_state)(struct work_struct *work);
void (*enable_tx_pause)(struct xgene_enet_pdata *pdata, bool enable);
void (*flowctl_rx)(struct xgene_enet_pdata *pdata, bool enable);
void (*flowctl_tx)(struct xgene_enet_pdata *pdata, bool enable);
}; };
struct xgene_port_ops { struct xgene_port_ops {
...@@ -152,7 +169,7 @@ struct xgene_port_ops { ...@@ -152,7 +169,7 @@ struct xgene_port_ops {
void (*clear)(struct xgene_enet_pdata *pdata, void (*clear)(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring); struct xgene_enet_desc_ring *ring);
void (*cle_bypass)(struct xgene_enet_pdata *pdata, void (*cle_bypass)(struct xgene_enet_pdata *pdata,
u32 dst_ring_num, u16 bufpool_id); u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id);
void (*shutdown)(struct xgene_enet_pdata *pdata); void (*shutdown)(struct xgene_enet_pdata *pdata);
}; };
...@@ -220,6 +237,9 @@ struct xgene_enet_pdata { ...@@ -220,6 +237,9 @@ struct xgene_enet_pdata {
bool mdio_driver; bool mdio_driver;
struct gpio_desc *sfp_rdy; struct gpio_desc *sfp_rdy;
bool sfp_gpio_en; bool sfp_gpio_en;
u32 pause_autoneg;
bool tx_pause;
bool rx_pause;
}; };
struct xgene_indirect_ctl { struct xgene_indirect_ctl {
......
...@@ -119,6 +119,7 @@ static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) ...@@ -119,6 +119,7 @@ static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
ring_id_buf = (ring->num << 9) & GENMASK(18, 9); ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
ring_id_buf |= PREFETCH_BUF_EN; ring_id_buf |= PREFETCH_BUF_EN;
if (is_bufpool) if (is_bufpool)
ring_id_buf |= IS_BUFFER_POOL; ring_id_buf |= IS_BUFFER_POOL;
......
...@@ -343,6 +343,11 @@ static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p) ...@@ -343,6 +343,11 @@ static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
xgene_enet_wr_mcx_csr(p, icm2_addr, icm2); xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
} }
static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
{
xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
}
static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p) static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
{ {
u32 data, loop = 10; u32 data, loop = 10;
...@@ -360,11 +365,39 @@ static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p) ...@@ -360,11 +365,39 @@ static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
netdev_err(p->ndev, "Auto-negotiation failed\n"); netdev_err(p->ndev, "Auto-negotiation failed\n");
} }
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
{
u32 data;
data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
if (set)
data |= bits;
else
data &= ~bits;
xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
}
static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable)
{
xgene_sgmac_rxtx(p, TX_FLOW_EN, enable);
p->mac_ops->enable_tx_pause(p, enable);
}
static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
{
xgene_sgmac_rxtx(pdata, RX_FLOW_EN, enable);
}
static void xgene_sgmac_init(struct xgene_enet_pdata *p) static void xgene_sgmac_init(struct xgene_enet_pdata *p)
{ {
u32 pause_thres_reg, pause_off_thres_reg;
u32 enet_spare_cfg_reg, rsif_config_reg; u32 enet_spare_cfg_reg, rsif_config_reg;
u32 cfg_bypass_reg, rx_dv_gate_reg; u32 cfg_bypass_reg, rx_dv_gate_reg;
u32 data, offset; u32 data, data1, data2, offset;
u32 multi_dpf_reg;
if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver)) if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
xgene_sgmac_reset(p); xgene_sgmac_reset(p);
...@@ -400,24 +433,50 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) ...@@ -400,24 +433,50 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
xgene_enet_wr_csr(p, rsif_config_reg, data); xgene_enet_wr_csr(p, rsif_config_reg, data);
/* Bypass traffic gating */ /* Configure HW pause frame generation */
xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84); multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR :
xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX); XG_MCX_MULTI_DPF0_ADDR;
xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0); data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg);
} data = (DEF_QUANTA << 16) | (data & 0xffff);
xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data);
if (p->enet_id != XGENE_ENET1) {
data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR);
data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data);
}
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH :
{ XG_RXBUF_PAUSE_THRESH;
u32 data; pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ?
RXBUF_PAUSE_OFF_THRESH : 0;
data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR); if (p->enet_id == XGENE_ENET1) {
data1 = xgene_enet_rd_csr(p, pause_thres_reg);
data2 = xgene_enet_rd_csr(p, pause_off_thres_reg);
if (!(p->port_id % 2)) {
data1 = (data1 & 0xffff0000) | DEF_PAUSE_THRES;
data2 = (data2 & 0xffff0000) | DEF_PAUSE_OFF_THRES;
} else {
data1 = (data1 & 0xffff) | (DEF_PAUSE_THRES << 16);
data2 = (data2 & 0xffff) | (DEF_PAUSE_OFF_THRES << 16);
}
if (set) xgene_enet_wr_csr(p, pause_thres_reg, data1);
data |= bits; xgene_enet_wr_csr(p, pause_off_thres_reg, data2);
else } else {
data &= ~bits; data = (DEF_PAUSE_OFF_THRES << 16) | DEF_PAUSE_THRES;
xgene_enet_wr_csr(p, pause_thres_reg, data);
}
xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data); xgene_sgmac_flowctl_tx(p, p->tx_pause);
xgene_sgmac_flowctl_rx(p, p->rx_pause);
/* Bypass traffic gating */
xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
} }
static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p) static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
...@@ -484,11 +543,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p) ...@@ -484,11 +543,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p)
} }
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
u32 dst_ring_num, u16 bufpool_id) u32 dst_ring_num, u16 bufpool_id,
u16 nxtbufpool_id)
{ {
u32 data, fpsel;
u32 cle_bypass_reg0, cle_bypass_reg1; u32 cle_bypass_reg0, cle_bypass_reg1;
u32 offset = p->port_id * MAC_OFFSET; u32 offset = p->port_id * MAC_OFFSET;
u32 data, fpsel, nxtfpsel;
if (p->enet_id == XGENE_ENET1) { if (p->enet_id == XGENE_ENET1) {
cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR; cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
...@@ -501,24 +561,24 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, ...@@ -501,24 +561,24 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
data = CFG_CLE_BYPASS_EN0; data = CFG_CLE_BYPASS_EN0;
xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data); xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; fpsel = xgene_enet_get_fpsel(bufpool_id);
data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel); nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) |
CFG_CLE_NXTFPSEL0(nxtfpsel);
xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data); xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
} }
static void xgene_enet_clear(struct xgene_enet_pdata *pdata, static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring) struct xgene_enet_desc_ring *ring)
{ {
u32 addr, val, data; u32 addr, data;
val = xgene_enet_ring_bufnum(ring->id);
if (xgene_enet_is_bufpool(ring->id)) { if (xgene_enet_is_bufpool(ring->id)) {
addr = ENET_CFGSSQMIFPRESET_ADDR; addr = ENET_CFGSSQMIFPRESET_ADDR;
data = BIT(val - 0x20); data = BIT(xgene_enet_get_fpsel(ring->id));
} else { } else {
addr = ENET_CFGSSQMIWQRESET_ADDR; addr = ENET_CFGSSQMIWQRESET_ADDR;
data = BIT(val); data = BIT(xgene_enet_ring_bufnum(ring->id));
} }
xgene_enet_wr_ring_if(pdata, addr, data); xgene_enet_wr_ring_if(pdata, addr, data);
...@@ -528,24 +588,23 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *p) ...@@ -528,24 +588,23 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{ {
struct device *dev = &p->pdev->dev; struct device *dev = &p->pdev->dev;
struct xgene_enet_desc_ring *ring; struct xgene_enet_desc_ring *ring;
u32 pb, val; u32 pb;
int i; int i;
pb = 0; pb = 0;
for (i = 0; i < p->rxq_cnt; i++) { for (i = 0; i < p->rxq_cnt; i++) {
ring = p->rx_ring[i]->buf_pool; ring = p->rx_ring[i]->buf_pool;
pb |= BIT(xgene_enet_get_fpsel(ring->id));
val = xgene_enet_ring_bufnum(ring->id); ring = p->rx_ring[i]->page_pool;
pb |= BIT(val - 0x20); if (ring)
pb |= BIT(xgene_enet_get_fpsel(ring->id));
} }
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb); xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
pb = 0; pb = 0;
for (i = 0; i < p->txq_cnt; i++) { for (i = 0; i < p->txq_cnt; i++) {
ring = p->tx_ring[i]; ring = p->tx_ring[i];
pb |= BIT(xgene_enet_ring_bufnum(ring->id));
val = xgene_enet_ring_bufnum(ring->id);
pb |= BIT(val);
} }
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb); xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb);
...@@ -586,6 +645,25 @@ static void xgene_enet_link_state(struct work_struct *work) ...@@ -586,6 +645,25 @@ static void xgene_enet_link_state(struct work_struct *work)
schedule_delayed_work(&p->link_work, poll_interval); schedule_delayed_work(&p->link_work, poll_interval);
} }
static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable)
{
u32 data, ecm_cfg_addr;
if (p->enet_id == XGENE_ENET1) {
ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR :
CSR_ECM_CFG_1_ADDR;
} else {
ecm_cfg_addr = XG_MCX_ECM_CFG_0_ADDR;
}
data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr);
if (enable)
data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
else
data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data);
}
const struct xgene_mac_ops xgene_sgmac_ops = { const struct xgene_mac_ops xgene_sgmac_ops = {
.init = xgene_sgmac_init, .init = xgene_sgmac_init,
.reset = xgene_sgmac_reset, .reset = xgene_sgmac_reset,
...@@ -595,7 +673,11 @@ const struct xgene_mac_ops xgene_sgmac_ops = { ...@@ -595,7 +673,11 @@ const struct xgene_mac_ops xgene_sgmac_ops = {
.tx_disable = xgene_sgmac_tx_disable, .tx_disable = xgene_sgmac_tx_disable,
.set_speed = xgene_sgmac_set_speed, .set_speed = xgene_sgmac_set_speed,
.set_mac_addr = xgene_sgmac_set_mac_addr, .set_mac_addr = xgene_sgmac_set_mac_addr,
.link_state = xgene_enet_link_state .set_framesize = xgene_sgmac_set_frame_size,
.link_state = xgene_enet_link_state,
.enable_tx_pause = xgene_sgmac_enable_tx_pause,
.flowctl_tx = xgene_sgmac_flowctl_tx,
.flowctl_rx = xgene_sgmac_flowctl_rx
}; };
const struct xgene_port_ops xgene_sgport_ops = { const struct xgene_port_ops xgene_sgport_ops = {
......
...@@ -101,6 +101,14 @@ static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata, ...@@ -101,6 +101,14 @@ static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
wr_addr); wr_addr);
} }
static void xgene_enet_wr_axg_csr(struct xgene_enet_pdata *pdata,
u32 offset, u32 val)
{
void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
iowrite32(val, addr);
}
static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
u32 offset, u32 *val) u32 offset, u32 *val)
{ {
...@@ -174,6 +182,14 @@ static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata, ...@@ -174,6 +182,14 @@ static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
return success; return success;
} }
static void xgene_enet_rd_axg_csr(struct xgene_enet_pdata *pdata,
u32 offset, u32 *val)
{
void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
*val = ioread32(addr);
}
static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
{ {
struct net_device *ndev = pdata->ndev; struct net_device *ndev = pdata->ndev;
...@@ -250,6 +266,12 @@ static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata, ...@@ -250,6 +266,12 @@ static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data); xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
} }
static void xgene_xgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
{
xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR,
((((size + 2) >> 2) << 16) | size));
}
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
{ {
u32 data; u32 data;
...@@ -259,6 +281,51 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) ...@@ -259,6 +281,51 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
return data; return data;
} }
static void xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
bool enable)
{
u32 data;
xgene_enet_rd_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, &data);
if (enable)
data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
else
data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
xgene_enet_wr_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, data);
}
static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
if (enable)
data |= HSTTCTLEN;
else
data &= ~HSTTCTLEN;
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
pdata->mac_ops->enable_tx_pause(pdata, enable);
}
static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
if (enable)
data |= HSTRCTLEN;
else
data &= ~HSTRCTLEN;
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
}
static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
{ {
u32 data; u32 data;
...@@ -282,6 +349,23 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) ...@@ -282,6 +349,23 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82); xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
/* Configure HW pause frame generation */
xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, &data);
data = (DEF_QUANTA << 16) | (data & 0xFFFF);
xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, data);
if (pdata->enet_id != XGENE_ENET1) {
xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, &data);
data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, data);
}
data = (XG_DEF_PAUSE_OFF_THRES << 16) | XG_DEF_PAUSE_THRES;
xgene_enet_wr_csr(pdata, XG_RXBUF_PAUSE_THRESH, data);
xgene_xgmac_flowctl_tx(pdata, pdata->tx_pause);
xgene_xgmac_flowctl_rx(pdata, pdata->rx_pause);
} }
static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata) static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
...@@ -350,44 +434,47 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) ...@@ -350,44 +434,47 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
} }
static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
u32 dst_ring_num, u16 bufpool_id) u32 dst_ring_num, u16 bufpool_id,
u16 nxtbufpool_id)
{ {
u32 cb, fpsel; u32 cb, fpsel, nxtfpsel;
xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb); xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0; cb |= CFG_CLE_BYPASS_EN0;
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb); xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; fpsel = xgene_enet_get_fpsel(bufpool_id);
nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb); xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
CFG_CLE_FPSEL0_SET(&cb, fpsel); CFG_CLE_FPSEL0_SET(&cb, fpsel);
CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb); xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel, nxtfpsel);
} }
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{ {
struct device *dev = &pdata->pdev->dev; struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring; struct xgene_enet_desc_ring *ring;
u32 pb, val; u32 pb;
int i; int i;
pb = 0; pb = 0;
for (i = 0; i < pdata->rxq_cnt; i++) { for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i]->buf_pool; ring = pdata->rx_ring[i]->buf_pool;
pb |= BIT(xgene_enet_get_fpsel(ring->id));
val = xgene_enet_ring_bufnum(ring->id); ring = pdata->rx_ring[i]->page_pool;
pb |= BIT(val - 0x20); if (ring)
pb |= BIT(xgene_enet_get_fpsel(ring->id));
} }
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
pb = 0; pb = 0;
for (i = 0; i < pdata->txq_cnt; i++) { for (i = 0; i < pdata->txq_cnt; i++) {
ring = pdata->tx_ring[i]; ring = pdata->tx_ring[i];
pb |= BIT(xgene_enet_ring_bufnum(ring->id));
val = xgene_enet_ring_bufnum(ring->id);
pb |= BIT(val);
} }
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
...@@ -400,16 +487,14 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) ...@@ -400,16 +487,14 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
static void xgene_enet_clear(struct xgene_enet_pdata *pdata, static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring) struct xgene_enet_desc_ring *ring)
{ {
u32 addr, val, data; u32 addr, data;
val = xgene_enet_ring_bufnum(ring->id);
if (xgene_enet_is_bufpool(ring->id)) { if (xgene_enet_is_bufpool(ring->id)) {
addr = ENET_CFGSSQMIFPRESET_ADDR; addr = ENET_CFGSSQMIFPRESET_ADDR;
data = BIT(val - 0x20); data = BIT(xgene_enet_get_fpsel(ring->id));
} else { } else {
addr = ENET_CFGSSQMIWQRESET_ADDR; addr = ENET_CFGSSQMIWQRESET_ADDR;
data = BIT(val); data = BIT(xgene_enet_ring_bufnum(ring->id));
} }
xgene_enet_wr_ring_if(pdata, addr, data); xgene_enet_wr_ring_if(pdata, addr, data);
...@@ -473,8 +558,12 @@ const struct xgene_mac_ops xgene_xgmac_ops = { ...@@ -473,8 +558,12 @@ const struct xgene_mac_ops xgene_xgmac_ops = {
.rx_disable = xgene_xgmac_rx_disable, .rx_disable = xgene_xgmac_rx_disable,
.tx_disable = xgene_xgmac_tx_disable, .tx_disable = xgene_xgmac_tx_disable,
.set_mac_addr = xgene_xgmac_set_mac_addr, .set_mac_addr = xgene_xgmac_set_mac_addr,
.set_framesize = xgene_xgmac_set_frame_size,
.set_mss = xgene_xgmac_set_mss, .set_mss = xgene_xgmac_set_mss,
.link_state = xgene_enet_link_state .link_state = xgene_enet_link_state,
.enable_tx_pause = xgene_xgmac_enable_tx_pause,
.flowctl_rx = xgene_xgmac_flowctl_rx,
.flowctl_tx = xgene_xgmac_flowctl_tx
}; };
const struct xgene_port_ops xgene_xgport_ops = { const struct xgene_port_ops xgene_xgport_ops = {
......
...@@ -59,6 +59,11 @@ ...@@ -59,6 +59,11 @@
#define HSTMAXFRAME_LENGTH_ADDR 0x0020 #define HSTMAXFRAME_LENGTH_ADDR 0x0020
#define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004 #define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004
#define XG_MCX_ECM_CFG_0_ADDR 0x0074
#define XG_MCX_MULTI_DPF0_ADDR 0x007c
#define XG_MCX_MULTI_DPF1_ADDR 0x0080
#define XG_DEF_PAUSE_THRES 0x390
#define XG_DEF_PAUSE_OFF_THRES 0x2c0
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0 #define XG_RSIF_CONFIG_REG_ADDR 0x00a0
#define XCLE_BYPASS_REG0_ADDR 0x0160 #define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164 #define XCLE_BYPASS_REG1_ADDR 0x0164
...@@ -70,6 +75,10 @@ ...@@ -70,6 +75,10 @@
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c #define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
#define XGENET_CSR_ECM_CFG_0_ADDR 0x0880
#define XGENET_CSR_MULTI_DPF0_ADDR 0x0888
#define XGENET_CSR_MULTI_DPF1_ADDR 0x088c
#define XG_RXBUF_PAUSE_THRESH 0x0020
#define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0 #define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0
#define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8 #define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment