Commit 76ccf528 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'stmmac-ETF-support'

Jose Abreu says:

====================
net: stmmac: ETF support

This series adds the support for ETF scheduler in stmmac.

1) Starts adding the support by implementing Enhanced Descriptors in stmmac
main core. This is needed for ETF feature in XGMAC and QoS cores.

2) Integrates the ETF logic into stmmac TC core.

3) and 4) adds the HW specific support for ETF in XGMAC and QoS cores. The
IP feature is called TBS (Time Based Scheduling).

5) Enables ETF in GMAC5 IPK PCI entry for all Queues except Queue 0.

6) Adds the new TBS feature and even more information into the debugFS
HW features file.
====================
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents ab9837b5 28c1cf73
......@@ -368,6 +368,7 @@ struct dma_features {
unsigned int estdep;
unsigned int estsel;
unsigned int fpesel;
unsigned int tbssel;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
......
......@@ -171,6 +171,15 @@ struct dma_extended_desc {
__le32 des7; /* Tx/Rx Timestamp High */
};
/* Enhanced descriptor for TBS */
struct dma_edesc {
__le32 des4;
__le32 des5;
__le32 des6;
__le32 des7;
struct dma_desc basic;
};
/* Transmit checksum insertion control */
#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */
......
......@@ -239,6 +239,7 @@ enum power_event {
/* MAC HW features3 bitmap */
#define GMAC_HW_FEAT_ASP GENMASK(29, 28)
#define GMAC_HW_FEAT_TBSSEL BIT(27)
#define GMAC_HW_FEAT_FPESEL BIT(26)
#define GMAC_HW_FEAT_ESTWID GENMASK(21, 20)
#define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17)
......
......@@ -10,6 +10,7 @@
#include <linux/stmmac.h>
#include "common.h"
#include "dwmac4.h"
#include "dwmac4_descs.h"
static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
......@@ -505,6 +506,14 @@ static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
}
static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
{
p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV);
p->des5 = cpu_to_le32(nsec & TDES5_LT);
p->des6 = 0;
p->des7 = 0;
}
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
......@@ -534,6 +543,7 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_vlan = dwmac4_set_vlan,
.get_rx_header_len = dwmac4_get_rx_header_len,
.set_sec_addr = dwmac4_set_sec_addr,
.set_tbs = dwmac4_set_tbs,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
......
......@@ -73,6 +73,13 @@
#define TDES3_CONTEXT_TYPE BIT(30)
#define TDES3_CONTEXT_TYPE_SHIFT 30
/* TDES4 */
#define TDES4_LTV BIT(31)
#define TDES4_LT GENMASK(7, 0)
/* TDES5 */
#define TDES5_LT GENMASK(31, 8)
/* TDS3 use for both format (read and write back) */
#define TDES3_OWN BIT(31)
#define TDES3_OWN_SHIFT 31
......
......@@ -404,6 +404,7 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* 5.10 Features */
dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27;
dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26;
dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20;
dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17;
......@@ -471,6 +472,25 @@ static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
{
u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
if (en)
value |= DMA_CONTROL_EDSE;
else
value &= ~DMA_CONTROL_EDSE;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE;
if (en && !value)
return -EIO;
writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL);
return 0;
}
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
......@@ -527,4 +547,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
.enable_sph = dwmac4_enable_sph,
.enable_tbs = dwmac4_enable_tbs,
};
......@@ -22,6 +22,7 @@
#define DMA_DEBUG_STATUS_1 0x00001010
#define DMA_DEBUG_STATUS_2 0x00001014
#define DMA_AXI_BUS_MODE 0x00001028
#define DMA_TBS_CTRL 0x00001050
/* DMA Bus Mode bitmap */
#define DMA_BUS_MODE_SFT_RESET BIT(0)
......@@ -82,6 +83,11 @@
#define DMA_AXI_BURST_LEN_MASK 0x000000FE
/* DMA TBS Control */
#define DMA_TBS_FTOS GENMASK(31, 8)
#define DMA_TBS_FTOV BIT(0)
#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
/* Following DMA defines are chanels oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
......@@ -114,6 +120,7 @@
#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
/* DMA Tx Channel X Control register defines */
#define DMA_CONTROL_EDSE BIT(28)
#define DMA_CONTROL_TSE BIT(12)
#define DMA_CONTROL_OSP BIT(4)
#define DMA_CONTROL_ST BIT(0)
......
......@@ -139,6 +139,7 @@
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
#define XGMAC_HW_FEATURE3 0x00000128
#define XGMAC_HWFEAT_TBSSEL BIT(27)
#define XGMAC_HWFEAT_FPESEL BIT(26)
#define XGMAC_HWFEAT_ESTWID GENMASK(24, 23)
#define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20)
......@@ -346,6 +347,13 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
#define XGMAC_DMA_TBS_CTRL0 0x00003054
#define XGMAC_DMA_TBS_CTRL1 0x00003058
#define XGMAC_DMA_TBS_CTRL2 0x0000305c
#define XGMAC_DMA_TBS_CTRL3 0x00003060
#define XGMAC_FTOS GENMASK(31, 8)
#define XGMAC_FTOV BIT(0)
#define XGMAC_DEF_FTOS (XGMAC_FTOS | XGMAC_FTOV)
#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
#define XGMAC_MCSIS BIT(31)
#define XGMAC_MSUIS BIT(29)
......@@ -360,6 +368,7 @@
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
#define XGMAC_EDSE BIT(28)
#define XGMAC_TxPBL GENMASK(21, 16)
#define XGMAC_TxPBL_SHIFT 16
#define XGMAC_TSE BIT(12)
......@@ -404,6 +413,9 @@
#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
/* Descriptors */
#define XGMAC_TDES0_LTV BIT(31)
#define XGMAC_TDES0_LT GENMASK(7, 0)
#define XGMAC_TDES1_LT GENMASK(31, 8)
#define XGMAC_TDES2_IVT GENMASK(31, 16)
#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
......@@ -422,6 +434,7 @@
#define XGMAC_TDES3_TCMSSV BIT(26)
#define XGMAC_TDES3_SAIC GENMASK(25, 23)
#define XGMAC_TDES3_SAIC_SHIFT 23
#define XGMAC_TDES3_TBSV BIT(24)
#define XGMAC_TDES3_THL GENMASK(22, 19)
#define XGMAC_TDES3_THL_SHIFT 19
#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
......
......@@ -339,6 +339,14 @@ static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
}
static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
{
p->des4 = cpu_to_le32((sec & XGMAC_TDES0_LT) | XGMAC_TDES0_LTV);
p->des5 = cpu_to_le32(nsec & XGMAC_TDES1_LT);
p->des6 = 0;
p->des7 = 0;
}
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
......@@ -368,4 +376,5 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_sarc = dwxgmac2_set_sarc,
.set_vlan_tag = dwxgmac2_set_vlan_tag,
.set_vlan = dwxgmac2_set_vlan,
.set_tbs = dwxgmac2_set_tbs,
};
......@@ -429,6 +429,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 3 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
......@@ -523,6 +524,28 @@ static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
}
static int dwxgmac2_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
{
u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
if (en)
value |= XGMAC_EDSE;
else
value &= ~XGMAC_EDSE;
writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
if (en && !value)
return -EIO;
writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
return 0;
}
const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.reset = dwxgmac2_dma_reset,
.init = dwxgmac2_dma_init,
......@@ -550,4 +573,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
.enable_sph = dwxgmac2_enable_sph,
.enable_tbs = dwxgmac2_enable_tbs,
};
......@@ -29,6 +29,7 @@ struct stmmac_extra_stats;
struct stmmac_safety_stats;
struct dma_desc;
struct dma_extended_desc;
struct dma_edesc;
/* Descriptors helpers */
struct stmmac_desc_ops {
......@@ -95,6 +96,7 @@ struct stmmac_desc_ops {
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
u32 inner_type);
void (*set_vlan)(struct dma_desc *p, u32 type);
void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec);
};
#define stmmac_init_rx_desc(__priv, __args...) \
......@@ -157,6 +159,8 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
#define stmmac_set_desc_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_vlan, __args)
#define stmmac_set_desc_tbs(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_tbs, __args)
struct stmmac_dma_cfg;
struct dma_features;
......@@ -210,6 +214,7 @@ struct stmmac_dma_ops {
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
......@@ -268,6 +273,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
#define stmmac_enable_sph(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_sph, __args)
#define stmmac_enable_tbs(__priv, __args...) \
stmmac_do_callback(__priv, dma, enable_tbs, __args)
struct mac_device_info;
struct net_device;
......@@ -526,6 +533,7 @@ struct tc_cls_u32_offload;
struct tc_cbs_qopt_offload;
struct flow_cls_offload;
struct tc_taprio_qopt_offload;
struct tc_etf_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
......@@ -537,6 +545,8 @@ struct stmmac_tc_ops {
struct flow_cls_offload *cls);
int (*setup_taprio)(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt);
int (*setup_etf)(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
......@@ -549,6 +559,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_cls, __args)
#define stmmac_tc_setup_taprio(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_taprio, __args)
#define stmmac_tc_setup_etf(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_etf, __args)
struct stmmac_counters;
......
......@@ -39,13 +39,18 @@ struct stmmac_tx_info {
bool is_jumbo;
};
#define STMMAC_TBS_AVAIL BIT(0)
#define STMMAC_TBS_EN BIT(1)
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
u32 tx_count_frames;
int tbs;
struct timer_list txtimer;
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
struct dma_edesc *dma_entx;
struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
struct stmmac_tx_info *tx_skbuff_dma;
......
......@@ -1090,6 +1090,8 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
if (priv->extend_desc)
head_tx = (void *)tx_q->dma_etx;
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
head_tx = (void *)tx_q->dma_entx;
else
head_tx = (void *)tx_q->dma_tx;
......@@ -1163,13 +1165,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the TX descriptors */
for (i = 0; i < DMA_TX_SIZE; i++)
for (i = 0; i < DMA_TX_SIZE; i++) {
int last = (i == (DMA_TX_SIZE - 1));
struct dma_desc *p;
if (priv->extend_desc)
stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
priv->mode, (i == DMA_TX_SIZE - 1));
p = &tx_q->dma_etx[i].basic;
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &tx_q->dma_entx[i].basic;
else
stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
priv->mode, (i == DMA_TX_SIZE - 1));
p = &tx_q->dma_tx[i];
stmmac_init_tx_desc(priv, p, priv->mode, last);
}
}
/**
......@@ -1383,7 +1391,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
else
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
}
......@@ -1392,6 +1400,8 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
struct dma_desc *p;
if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &((tx_q->dma_entx + i)->basic);
else
p = tx_q->dma_tx + i;
......@@ -1511,19 +1521,26 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
/* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
size_t size;
void *addr;
/* Release the DMA TX socket buffers */
dma_free_tx_skbufs(priv, queue);
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
dma_free_coherent(priv->device,
DMA_TX_SIZE * sizeof(struct dma_desc),
tx_q->dma_tx, tx_q->dma_tx_phy);
else
dma_free_coherent(priv->device, DMA_TX_SIZE *
sizeof(struct dma_extended_desc),
tx_q->dma_etx, tx_q->dma_tx_phy);
if (priv->extend_desc) {
size = sizeof(struct dma_extended_desc);
addr = tx_q->dma_etx;
} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
size = sizeof(struct dma_edesc);
addr = tx_q->dma_entx;
} else {
size = sizeof(struct dma_desc);
addr = tx_q->dma_tx;
}
size *= DMA_TX_SIZE;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
kfree(tx_q->tx_skbuff_dma);
kfree(tx_q->tx_skbuff);
......@@ -1616,6 +1633,8 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
size_t size;
void *addr;
tx_q->queue_index = queue;
tx_q->priv_data = priv;
......@@ -1632,28 +1651,32 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
if (!tx_q->tx_skbuff)
goto err_dma;
if (priv->extend_desc) {
tx_q->dma_etx = dma_alloc_coherent(priv->device,
DMA_TX_SIZE * sizeof(struct dma_extended_desc),
&tx_q->dma_tx_phy,
GFP_KERNEL);
if (!tx_q->dma_etx)
goto err_dma;
} else {
tx_q->dma_tx = dma_alloc_coherent(priv->device,
DMA_TX_SIZE * sizeof(struct dma_desc),
&tx_q->dma_tx_phy,
GFP_KERNEL);
if (!tx_q->dma_tx)
goto err_dma;
}
if (priv->extend_desc)
size = sizeof(struct dma_extended_desc);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
size = sizeof(struct dma_edesc);
else
size = sizeof(struct dma_desc);
size *= DMA_TX_SIZE;
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
if (!addr)
goto err_dma;
if (priv->extend_desc)
tx_q->dma_etx = addr;
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
tx_q->dma_entx = addr;
else
tx_q->dma_tx = addr;
}
return 0;
err_dma:
free_dma_tx_desc_resources(priv);
return ret;
}
......@@ -1885,6 +1908,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->extend_desc)
p = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &tx_q->dma_entx[entry].basic;
else
p = tx_q->dma_tx + entry;
......@@ -1983,19 +2008,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int i;
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan);
dma_free_tx_skbufs(priv, chan);
for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
priv->mode, (i == DMA_TX_SIZE - 1));
else
stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
priv->mode, (i == DMA_TX_SIZE - 1));
stmmac_clear_tx_descriptors(priv, chan);
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
......@@ -2632,6 +2650,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->dma_cap.vlins)
stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
/* TBS */
for (chan = 0; chan < tx_cnt; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
}
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
......@@ -2689,6 +2715,16 @@ static int stmmac_open(struct net_device *dev)
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
/* Earlier check for TBS */
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
tx_q->tbs &= ~STMMAC_TBS_AVAIL;
}
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
......@@ -2837,7 +2873,11 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
tag = skb_vlan_tag_get(skb);
p = tx_q->dma_tx + tx_q->cur_tx;
if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
p = &tx_q->dma_tx[tx_q->cur_tx];
if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
return false;
......@@ -2872,7 +2912,11 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx;
if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
if (priv->dma_cap.addr64 <= 32)
......@@ -2923,13 +2967,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
int desc_size, tmp_pay_len = 0, first_tx;
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
u8 proto_hdr_len, hdr;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
u32 pay_len, mss;
dma_addr_t des;
int i;
......@@ -2966,7 +3010,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* set new MSS value if needed */
if (mss != tx_q->mss) {
mss_desc = tx_q->dma_tx + tx_q->cur_tx;
if (tx_q->tbs & STMMAC_TBS_AVAIL)
mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
......@@ -2986,7 +3034,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry;
if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[first_entry].basic;
else
desc = &tx_q->dma_tx[first_entry];
first = desc;
if (has_vlan)
......@@ -3058,7 +3109,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
set_ic = false;
if (set_ic) {
desc = &tx_q->dma_tx[tx_q->cur_tx];
if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
desc = &tx_q->dma_tx[tx_q->cur_tx];
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
......@@ -3121,16 +3176,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
tx_q->cur_tx, first, nfrags);
stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb_headlen(skb));
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc_size = sizeof(struct dma_edesc);
else
desc_size = sizeof(struct dma_desc);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
......@@ -3160,10 +3217,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type;
struct dma_edesc *tbs_desc = NULL;
int entry, desc_size, first_tx;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
dma_addr_t des;
tx_q = &priv->tx_queue[queue];
......@@ -3203,6 +3261,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[entry].basic;
else
desc = tx_q->dma_tx + entry;
......@@ -3232,6 +3292,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[entry].basic;
else
desc = tx_q->dma_tx + entry;
......@@ -3278,6 +3340,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (set_ic) {
if (likely(priv->extend_desc))
desc = &tx_q->dma_etx[entry].basic;
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[entry].basic;
else
desc = &tx_q->dma_tx[entry];
......@@ -3295,20 +3359,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
void *tx_head;
netdev_dbg(priv->dev,
"%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
entry, first, nfrags);
if (priv->extend_desc)
tx_head = (void *)tx_q->dma_etx;
else
tx_head = (void *)tx_q->dma_tx;
stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
......@@ -3354,12 +3409,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the first descriptor setting the OWN bit too */
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
csum_insertion, priv->mode, 1, last_segment,
csum_insertion, priv->mode, 0, last_segment,
skb->len);
} else {
stmmac_set_tx_owner(priv, first);
}
if (tx_q->tbs & STMMAC_TBS_EN) {
struct timespec64 ts = ns_to_timespec64(skb->tstamp);
tbs_desc = &tx_q->dma_entx[first_entry];
stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
}
stmmac_set_tx_owner(priv, first);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
......@@ -3370,7 +3432,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
if (likely(priv->extend_desc))
desc_size = sizeof(struct dma_extended_desc);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc_size = sizeof(struct dma_edesc);
else
desc_size = sizeof(struct dma_desc);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
......@@ -4090,6 +4159,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return stmmac_tc_setup_cbs(priv, priv, type_data);
case TC_SETUP_QDISC_TAPRIO:
return stmmac_tc_setup_taprio(priv, priv, type_data);
case TC_SETUP_QDISC_ETF:
return stmmac_tc_setup_etf(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
......@@ -4193,7 +4264,7 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
DMA_TX_SIZE, 1, seq);
} else {
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
DMA_TX_SIZE, 0, seq);
......@@ -4294,6 +4365,12 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
priv->dma_cap.l3l4fnum);
seq_printf(seq, "\tARP Offloading: %s\n",
priv->dma_cap.arpoffsel ? "Y" : "N");
seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
priv->dma_cap.estsel ? "Y" : "N");
seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
priv->dma_cap.fpesel ? "Y" : "N");
seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
priv->dma_cap.tbssel ? "Y" : "N");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
......
......@@ -401,6 +401,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->tx_queues_cfg[i].use_prio = false;
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[i].weight = 25;
if (i > 0)
plat->tx_queues_cfg[i].tbs_en = 1;
}
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
......
......@@ -14,6 +14,7 @@
#include <linux/phy.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
......@@ -50,6 +51,7 @@ struct stmmac_packet_attrs {
u8 id;
int sarc;
u16 queue_mapping;
u64 timestamp;
};
static u8 stmmac_test_next_id;
......@@ -208,6 +210,9 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
skb->pkt_type = PACKET_HOST;
skb->dev = priv->dev;
if (attr->timestamp)
skb->tstamp = ns_to_ktime(attr->timestamp);
return skb;
}
......@@ -339,8 +344,7 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
goto cleanup;
}
skb_set_queue_mapping(skb, attr->queue_mapping);
ret = dev_queue_xmit(skb);
ret = dev_direct_xmit(skb, attr->queue_mapping);
if (ret)
goto cleanup;
......@@ -926,8 +930,7 @@ static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
goto vlan_del;
}
skb_set_queue_mapping(skb, 0);
ret = dev_queue_xmit(skb);
ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
......@@ -1018,8 +1021,7 @@ static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
goto vlan_del;
}
skb_set_queue_mapping(skb, 0);
ret = dev_queue_xmit(skb);
ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
......@@ -1286,8 +1288,7 @@ static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
__vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
skb->protocol = htons(proto);
skb_set_queue_mapping(skb, 0);
ret = dev_queue_xmit(skb);
ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
......@@ -1639,8 +1640,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
if (ret)
goto cleanup;
skb_set_queue_mapping(skb, 0);
ret = dev_queue_xmit(skb);
ret = dev_direct_xmit(skb, 0);
if (ret)
goto cleanup_promisc;
......@@ -1728,6 +1728,68 @@ static int stmmac_test_sph(struct stmmac_priv *priv)
return 0;
}
static int stmmac_test_tbs(struct stmmac_priv *priv)
{
#define STMMAC_TBS_LT_OFFSET (500 * 1000 * 1000) /* 500 ms*/
struct stmmac_packet_attrs attr = { };
struct tc_etf_qopt_offload qopt;
u64 start_time, curr_time = 0;
unsigned long flags;
int ret, i;
if (!priv->hwts_tx_en)
return -EOPNOTSUPP;
/* Find first TBS enabled Queue, if any */
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
break;
if (i >= priv->plat->tx_queues_to_use)
return -EOPNOTSUPP;
qopt.enable = true;
qopt.queue = i;
ret = stmmac_tc_setup_etf(priv, priv, &qopt);
if (ret)
return ret;
spin_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
if (!curr_time) {
ret = -EOPNOTSUPP;
goto fail_disable;
}
start_time = curr_time;
curr_time += STMMAC_TBS_LT_OFFSET;
attr.dst = priv->dev->dev_addr;
attr.timestamp = curr_time;
attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET);
attr.queue_mapping = i;
ret = __stmmac_test_loopback(priv, &attr);
if (ret)
goto fail_disable;
/* Check if expected time has elapsed */
spin_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
ret = -EINVAL;
fail_disable:
qopt.enable = false;
stmmac_tc_setup_etf(priv, priv, &qopt);
return ret;
}
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
......@@ -1861,6 +1923,10 @@ static const struct stmmac_test {
.name = "Split Header ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_sph,
}, {
.name = "TBS (ETF Scheduler) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_tbs,
},
};
......@@ -1869,7 +1935,6 @@ void stmmac_selftest_run(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
int count = stmmac_selftest_get_count(priv);
int carrier = netif_carrier_ok(dev);
int i, ret;
memset(buf, 0, sizeof(*buf) * count);
......@@ -1879,15 +1944,12 @@ void stmmac_selftest_run(struct net_device *dev,
netdev_err(priv->dev, "Only offline tests are supported\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
} else if (!carrier) {
} else if (!netif_carrier_ok(dev)) {
netdev_err(priv->dev, "You need valid Link to execute tests\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
/* We don't want extra traffic */
netif_carrier_off(dev);
/* Wait for queues drain */
msleep(200);
......@@ -1942,10 +2004,6 @@ void stmmac_selftest_run(struct net_device *dev,
break;
}
}
/* Restart everything */
if (carrier)
netif_carrier_on(dev);
}
void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
......
......@@ -727,10 +727,31 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return ret;
}
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
if (!priv->dma_cap.tbssel)
return -EOPNOTSUPP;
if (qopt->queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
return -EINVAL;
if (qopt->enable)
priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
else
priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
netdev_info(priv->dev, "%s ETF for Queue %d\n",
qopt->enable ? "enabled" : "disabled", qopt->queue);
return 0;
}
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
.setup_cbs = tc_setup_cbs,
.setup_cls = tc_setup_cls,
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
};
......@@ -139,6 +139,7 @@ struct stmmac_txq_cfg {
u32 low_credit;
bool use_prio;
u32 prio;
int tbs_en;
};
struct plat_stmmacenet_data {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment