Commit d54e1348 authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-multivector-msi'

Voon Weifeng says:

====================
net: stmmac: enable multi-vector MSI

This patchset adds support for multi MSI interrupts in addition to
current single common interrupt implementation. Each MSI interrupt is tied
to a newly introduce interrupt service routine(ISR). Hence, each interrupt
will only go through the corresponding ISR.

In order to increase the efficiency, enabling multi MSI interrupt will
automatically select the interrupt mode configuration INTM=1. When INTM=1,
the TX/RX transfer complete signal will only asserted on corresponding
sbd_perch_tx_intr_o[] or sbd_perch_rx_intr_o[] without asserting signal
on the common sbd_intr_o. Hence, for each TX/RX interrupts, only the
corresponding ISR will be triggered.

Every vendor might have different MSI vector assignment. So, this patchset
only includes multi-vector MSI assignment for Intel platform.

Changes:
v1 -> v2
 patch 2/5
 -Remove defensive check for invalid dev pointer
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6c996e19 6ccf12ae
......@@ -259,6 +259,9 @@ struct stmmac_safety_stats {
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
/* MSI defines */
#define STMMAC_MSI_VEC_MAX 32
/* PCS status and mask defines */
#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
#define PCS_LINK_IRQ BIT(1) /* PCS Link */
......@@ -309,6 +312,24 @@ enum dma_irq_status {
handle_tx = 0x8,
};
enum dma_irq_dir {
DMA_DIR_RX = 0x1,
DMA_DIR_TX = 0x2,
DMA_DIR_RXTX = 0x3,
};
enum request_irq_err {
REQ_IRQ_ERR_ALL,
REQ_IRQ_ERR_TX,
REQ_IRQ_ERR_RX,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
REQ_IRQ_ERR_LPI,
REQ_IRQ_ERR_WOL,
REQ_IRQ_ERR_MAC,
REQ_IRQ_ERR_NO,
};
/* EEE and LPI defines */
#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
......
......@@ -492,6 +492,14 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_crossts = true;
plat->crosststamp = intel_crosststamp;
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
plat->msi_lpi_vec = 28;
plat->msi_sfty_ce_vec = 27;
plat->msi_sfty_ue_vec = 26;
plat->msi_rx_base_vec = 0;
plat->msi_tx_base_vec = 1;
return 0;
}
......@@ -776,6 +784,79 @@ static const struct stmmac_pci_info quark_info = {
.setup = quark_default_data,
};
static int stmmac_config_single_msi(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
int ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0) {
dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
__func__);
return ret;
}
res->irq = pci_irq_vector(pdev, 0);
res->wol_irq = res->irq;
plat->multi_msi_en = 0;
dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
__func__);
return 0;
}
static int stmmac_config_multi_msi(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
int ret;
int i;
if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
__func__);
return -1;
}
ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0) {
dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
__func__);
return ret;
}
/* For RX MSI */
for (i = 0; i < plat->rx_queues_to_use; i++) {
res->rx_irq[i] = pci_irq_vector(pdev,
plat->msi_rx_base_vec + i * 2);
}
/* For TX MSI */
for (i = 0; i < plat->tx_queues_to_use; i++) {
res->tx_irq[i] = pci_irq_vector(pdev,
plat->msi_tx_base_vec + i * 2);
}
if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
plat->multi_msi_en = 1;
dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
return 0;
}
/**
* intel_eth_pci_probe
*
......@@ -833,18 +914,24 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
plat->bsp_priv = intel_priv;
intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
/* Initialize all MSI vectors to invalid so that it can be set
* according to platform data settings below.
* Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
*/
plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
ret = info->setup(pdev, plat);
if (ret)
return ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
res.wol_irq = pci_irq_vector(pdev, 0);
res.irq = pci_irq_vector(pdev, 0);
if (plat->eee_usecs_rate > 0) {
u32 tx_lpi_usec;
......@@ -853,13 +940,28 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
}
ret = stmmac_config_multi_msi(pdev, plat, &res);
if (ret) {
ret = stmmac_config_single_msi(pdev, plat, &res);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
__func__);
goto err_alloc_irq;
}
}
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
pci_free_irq_vectors(pdev);
clk_disable_unprepare(plat->stmmac_clk);
clk_unregister_fixed_rate(plat->stmmac_clk);
goto err_dvr_probe;
}
return 0;
err_dvr_probe:
pci_free_irq_vectors(pdev);
err_alloc_irq:
clk_disable_unprepare(plat->stmmac_clk);
clk_unregister_fixed_rate(plat->stmmac_clk);
return ret;
}
......
......@@ -239,6 +239,22 @@ static const struct emac_variant emac_variant_h6 = {
#define EMAC_RX_EARLY_INT BIT(13)
#define EMAC_RGMII_STA_INT BIT(16)
#define EMAC_INT_MSK_COMMON EMAC_RGMII_STA_INT
#define EMAC_INT_MSK_TX (EMAC_TX_INT | \
EMAC_TX_DMA_STOP_INT | \
EMAC_TX_BUF_UA_INT | \
EMAC_TX_TIMEOUT_INT | \
EMAC_TX_UNDERFLOW_INT | \
EMAC_TX_EARLY_INT |\
EMAC_INT_MSK_COMMON)
#define EMAC_INT_MSK_RX (EMAC_RX_INT | \
EMAC_RX_BUF_UA_INT | \
EMAC_RX_DMA_STOP_INT | \
EMAC_RX_TIMEOUT_INT | \
EMAC_RX_OVERFLOW_INT | \
EMAC_RX_EARLY_INT | \
EMAC_INT_MSK_COMMON)
#define MAC_ADDR_TYPE_DST BIT(31)
/* H3 specific bits for EPHY */
......@@ -412,13 +428,19 @@ static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
}
static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan)
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
u32 v;
int ret = 0;
v = readl(ioaddr + EMAC_INT_STA);
if (dir == DMA_DIR_RX)
v &= EMAC_INT_MSK_RX;
else if (dir == DMA_DIR_TX)
v &= EMAC_INT_MSK_TX;
if (v & EMAC_TX_INT) {
ret |= handle_tx;
x->tx_normal_irq_n++;
......
......@@ -161,6 +161,13 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
value |= DMA_SYS_BUS_EAME;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
if (dma_cfg->multi_msi_en) {
value = readl(ioaddr + DMA_BUS_MODE);
value &= ~DMA_BUS_MODE_INTM_MASK;
value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT);
writel(value, ioaddr + DMA_BUS_MODE);
}
}
static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
......
......@@ -25,6 +25,9 @@
#define DMA_TBS_CTRL 0x00001050
/* DMA Bus Mode bitmap */
#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16)
#define DMA_BUS_MODE_INTM_SHIFT 16
#define DMA_BUS_MODE_INTM_MODE1 0x1
#define DMA_BUS_MODE_SFT_RESET BIT(0)
/* DMA SYS Bus Mode bitmap */
......@@ -149,6 +152,25 @@
#define DMA_CHAN_STATUS_TPS BIT(1)
#define DMA_CHAN_STATUS_TI BIT(0)
#define DMA_CHAN_STATUS_MSK_COMMON (DMA_CHAN_STATUS_NIS | \
DMA_CHAN_STATUS_AIS | \
DMA_CHAN_STATUS_CDE | \
DMA_CHAN_STATUS_FBE)
#define DMA_CHAN_STATUS_MSK_RX (DMA_CHAN_STATUS_REB | \
DMA_CHAN_STATUS_ERI | \
DMA_CHAN_STATUS_RWT | \
DMA_CHAN_STATUS_RPS | \
DMA_CHAN_STATUS_RBU | \
DMA_CHAN_STATUS_RI | \
DMA_CHAN_STATUS_MSK_COMMON)
#define DMA_CHAN_STATUS_MSK_TX (DMA_CHAN_STATUS_ETI | \
DMA_CHAN_STATUS_TBU | \
DMA_CHAN_STATUS_TPS | \
DMA_CHAN_STATUS_TI | \
DMA_CHAN_STATUS_MSK_COMMON)
/* Interrupt enable bits per channel */
#define DMA_CHAN_INTR_ENA_NIE BIT(16)
#define DMA_CHAN_INTR_ENA_AIE BIT(15)
......@@ -206,7 +228,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac4_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan);
struct stmmac_extra_stats *x, u32 chan, u32 dir);
void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
......
......@@ -135,12 +135,17 @@ void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
}
int dwmac4_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan)
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
int ret = 0;
if (dir == DMA_DIR_RX)
intr_status &= DMA_CHAN_STATUS_MSK_RX;
else if (dir == DMA_DIR_TX)
intr_status &= DMA_CHAN_STATUS_MSK_TX;
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
......@@ -161,20 +166,19 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
}
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
if (likely(intr_status & DMA_CHAN_STATUS_NIS))
x->normal_irq_n++;
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
x->rx_normal_irq_n++;
ret |= handle_rx;
}
if (likely(intr_status & (DMA_CHAN_STATUS_TI |
DMA_CHAN_STATUS_TBU))) {
x->tx_normal_irq_n++;
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
x->rx_early_irq++;
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
x->rx_normal_irq_n++;
ret |= handle_rx;
}
if (likely(intr_status & (DMA_CHAN_STATUS_TI |
DMA_CHAN_STATUS_TBU))) {
x->tx_normal_irq_n++;
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
x->rx_early_irq++;
writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan));
return ret;
......
......@@ -128,6 +128,26 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \
DMA_STATUS_AIS | \
DMA_STATUS_FBI)
#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \
DMA_STATUS_RWT | \
DMA_STATUS_RPS | \
DMA_STATUS_RU | \
DMA_STATUS_RI | \
DMA_STATUS_OVF | \
DMA_STATUS_MSK_COMMON)
#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \
DMA_STATUS_UNF | \
DMA_STATUS_TJT | \
DMA_STATUS_TU | \
DMA_STATUS_TPS | \
DMA_STATUS_TI | \
DMA_STATUS_MSK_COMMON)
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
......@@ -139,7 +159,7 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
u32 chan);
u32 chan, u32 dir);
int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */
......@@ -155,7 +155,7 @@ static void show_rx_process_state(unsigned int status)
#endif
int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan)
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
int ret = 0;
/* read the status register (CSR5) */
......@@ -167,6 +167,12 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
show_tx_process_state(intr_status);
show_rx_process_state(intr_status);
#endif
if (dir == DMA_DIR_RX)
intr_status &= DMA_STATUS_MSK_RX;
else if (dir == DMA_DIR_TX)
intr_status &= DMA_STATUS_MSK_TX;
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_STATUS_AIS)) {
if (unlikely(intr_status & DMA_STATUS_UNF)) {
......
......@@ -412,6 +412,12 @@
#define XGMAC_TI BIT(0)
#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
#define XGMAC_DMA_STATUS_MSK_COMMON (XGMAC_NIS | XGMAC_AIS | XGMAC_FBE)
#define XGMAC_DMA_STATUS_MSK_RX (XGMAC_RBU | XGMAC_RI | \
XGMAC_DMA_STATUS_MSK_COMMON)
#define XGMAC_DMA_STATUS_MSK_TX (XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \
XGMAC_DMA_STATUS_MSK_COMMON)
/* Descriptors */
#define XGMAC_TDES0_LTV BIT(31)
#define XGMAC_TDES0_LT GENMASK(7, 0)
......
......@@ -323,12 +323,18 @@ static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
}
static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan)
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
if (dir == DMA_DIR_RX)
intr_status &= XGMAC_DMA_STATUS_MSK_RX;
else if (dir == DMA_DIR_TX)
intr_status &= XGMAC_DMA_STATUS_MSK_TX;
/* ABNORMAL interrupts */
if (unlikely(intr_status & XGMAC_AIS)) {
if (unlikely(intr_status & XGMAC_RBU)) {
......
......@@ -201,7 +201,7 @@ struct stmmac_dma_ops {
void (*start_rx)(void __iomem *ioaddr, u32 chan);
void (*stop_rx)(void __iomem *ioaddr, u32 chan);
int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan);
struct stmmac_extra_stats *x, u32 chan, u32 dir);
/* If supported then get the optional core features */
void (*get_hw_feature)(void __iomem *ioaddr,
struct dma_features *dma_cap);
......
......@@ -30,6 +30,10 @@ struct stmmac_resources {
int wol_irq;
int lpi_irq;
int irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
int tx_irq[MTL_MAX_TX_QUEUES];
};
struct stmmac_tx_info {
......@@ -225,6 +229,18 @@ struct stmmac_priv {
void __iomem *mmcaddr;
void __iomem *ptpaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
int tx_irq[MTL_MAX_TX_QUEUES];
/*irq name */
char int_name_mac[IFNAMSIZ + 9];
char int_name_wol[IFNAMSIZ + 9];
char int_name_lpi[IFNAMSIZ + 9];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
......
......@@ -105,6 +105,11 @@ module_param(chain_mode, int, 0444);
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
/* For MSI interrupts handling */
static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
#ifdef CONFIG_DEBUG_FS
static const struct net_device_ops stmmac_netdev_ops;
......@@ -2337,10 +2342,10 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
return false;
}
static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
{
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan);
&priv->xstats, chan, dir);
struct stmmac_channel *ch = &priv->channel[chan];
unsigned long flags;
......@@ -2386,7 +2391,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
channels_to_check = ARRAY_SIZE(status);
for (chan = 0; chan < channels_to_check; chan++)
status[chan] = stmmac_napi_check(priv, chan);
status[chan] = stmmac_napi_check(priv, chan,
DMA_DIR_RXTX);
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
......@@ -2985,6 +2991,260 @@ static void stmmac_hw_teardown(struct net_device *dev)
clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
static void stmmac_free_irq(struct net_device *dev,
enum request_irq_err irq_err, int irq_idx)
{
struct stmmac_priv *priv = netdev_priv(dev);
int j;
switch (irq_err) {
case REQ_IRQ_ERR_ALL:
irq_idx = priv->plat->tx_queues_to_use;
fallthrough;
case REQ_IRQ_ERR_TX:
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->tx_irq[j] > 0)
free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
}
irq_idx = priv->plat->rx_queues_to_use;
fallthrough;
case REQ_IRQ_ERR_RX:
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->rx_irq[j] > 0)
free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
}
if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
free_irq(priv->sfty_ue_irq, dev);
fallthrough;
case REQ_IRQ_ERR_SFTY_UE:
if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
free_irq(priv->sfty_ce_irq, dev);
fallthrough;
case REQ_IRQ_ERR_SFTY_CE:
if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
free_irq(priv->lpi_irq, dev);
fallthrough;
case REQ_IRQ_ERR_LPI:
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
case REQ_IRQ_ERR_WOL:
free_irq(dev->irq, dev);
fallthrough;
case REQ_IRQ_ERR_MAC:
case REQ_IRQ_ERR_NO:
/* If MAC IRQ request error, no more IRQ to free */
break;
}
}
static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
struct stmmac_priv *priv = netdev_priv(dev);
int irq_idx = 0;
char *int_name;
int ret;
int i;
/* For common interrupt */
int_name = priv->int_name_mac;
sprintf(int_name, "%s:%s", dev->name, "mac");
ret = request_irq(dev->irq, stmmac_mac_interrupt,
0, int_name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc mac MSI %d (error: %d)\n",
__func__, dev->irq, ret);
irq_err = REQ_IRQ_ERR_MAC;
goto irq_error;
}
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
int_name = priv->int_name_wol;
sprintf(int_name, "%s:%s", dev->name, "wol");
ret = request_irq(priv->wol_irq,
stmmac_mac_interrupt,
0, int_name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc wol MSI %d (error: %d)\n",
__func__, priv->wol_irq, ret);
irq_err = REQ_IRQ_ERR_WOL;
goto irq_error;
}
}
/* Request the LPI IRQ in case of another line
* is used for LPI
*/
if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
int_name = priv->int_name_lpi;
sprintf(int_name, "%s:%s", dev->name, "lpi");
ret = request_irq(priv->lpi_irq,
stmmac_mac_interrupt,
0, int_name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc lpi MSI %d (error: %d)\n",
__func__, priv->lpi_irq, ret);
irq_err = REQ_IRQ_ERR_LPI;
goto irq_error;
}
}
/* Request the Safety Feature Correctible Error line in
* case of another line is used
*/
if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
int_name = priv->int_name_sfty_ce;
sprintf(int_name, "%s:%s", dev->name, "safety-ce");
ret = request_irq(priv->sfty_ce_irq,
stmmac_safety_interrupt,
0, int_name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc sfty ce MSI %d (error: %d)\n",
__func__, priv->sfty_ce_irq, ret);
irq_err = REQ_IRQ_ERR_SFTY_CE;
goto irq_error;
}
}
/* Request the Safety Feature Uncorrectible Error line in
* case of another line is used
*/
if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
int_name = priv->int_name_sfty_ue;
sprintf(int_name, "%s:%s", dev->name, "safety-ue");
ret = request_irq(priv->sfty_ue_irq,
stmmac_safety_interrupt,
0, int_name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc sfty ue MSI %d (error: %d)\n",
__func__, priv->sfty_ue_irq, ret);
irq_err = REQ_IRQ_ERR_SFTY_UE;
goto irq_error;
}
}
/* Request Rx MSI irq */
for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
if (priv->rx_irq[i] == 0)
continue;
int_name = priv->int_name_rx_irq[i];
sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
ret = request_irq(priv->rx_irq[i],
stmmac_msi_intr_rx,
0, int_name, &priv->rx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc rx-%d MSI %d (error: %d)\n",
__func__, i, priv->rx_irq[i], ret);
irq_err = REQ_IRQ_ERR_RX;
irq_idx = i;
goto irq_error;
}
}
/* Request Tx MSI irq */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
if (priv->tx_irq[i] == 0)
continue;
int_name = priv->int_name_tx_irq[i];
sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
ret = request_irq(priv->tx_irq[i],
stmmac_msi_intr_tx,
0, int_name, &priv->tx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc tx-%d MSI %d (error: %d)\n",
__func__, i, priv->tx_irq[i], ret);
irq_err = REQ_IRQ_ERR_TX;
irq_idx = i;
goto irq_error;
}
}
return 0;
irq_error:
stmmac_free_irq(dev, irq_err, irq_idx);
return ret;
}
static int stmmac_request_irq_single(struct net_device *dev)
{
enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
ret = request_irq(dev->irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
irq_err = REQ_IRQ_ERR_MAC;
return ret;
}
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: ERROR: allocating the WoL IRQ %d (%d)\n",
__func__, priv->wol_irq, ret);
irq_err = REQ_IRQ_ERR_WOL;
return ret;
}
}
/* Request the IRQ lines */
if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
ret = request_irq(priv->lpi_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: ERROR: allocating the LPI IRQ %d (%d)\n",
__func__, priv->lpi_irq, ret);
irq_err = REQ_IRQ_ERR_LPI;
goto irq_error;
}
}
return 0;
irq_error:
stmmac_free_irq(dev, irq_err, 0);
return ret;
}
static int stmmac_request_irq(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
/* Request the IRQ lines */
if (priv->plat->multi_msi_en)
ret = stmmac_request_irq_multi_msi(dev);
else
ret = stmmac_request_irq_single(dev);
return ret;
}
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
......@@ -3076,50 +3336,15 @@ static int stmmac_open(struct net_device *dev)
/* We may have called phylink_speed_down before */
phylink_speed_up(priv->phylink);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
ret = stmmac_request_irq(dev);
if (ret)
goto irq_error;
}
/* Request the Wake IRQ in case of another line is used for WoL */
if (priv->wol_irq != dev->irq) {
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: ERROR: allocating the WoL IRQ %d (%d)\n",
__func__, priv->wol_irq, ret);
goto wolirq_error;
}
}
/* Request the IRQ lines */
if (priv->lpi_irq > 0) {
ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
dev->name, dev);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: ERROR: allocating the LPI IRQ %d (%d)\n",
__func__, priv->lpi_irq, ret);
goto lpiirq_error;
}
}
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
return 0;
lpiirq_error:
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
wolirq_error:
free_irq(dev->irq, dev);
irq_error:
phylink_stop(priv->phylink);
......@@ -3169,11 +3394,7 @@ static int stmmac_release(struct net_device *dev)
hrtimer_cancel(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
......@@ -4381,21 +4602,8 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
}
}
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
* @dev_id: to pass the net device pointer (must be valid).
* Description: this is the main driver interrupt service routine.
* It can call:
* o DMA service routine (to manage incoming frame reception and transmission
* status)
* o Core interrupts to manage: remote wake-up, management counter, LPI
* interrupts.
*/
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
static void stmmac_common_interrupt(struct stmmac_priv *priv)
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queues_count;
......@@ -4408,13 +4616,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
/* Check if a fatal error happened */
if (stmmac_safety_feat_interrupt(priv))
return IRQ_HANDLED;
if (priv->dma_cap.estsel)
stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
&priv->xstats, tx_cnt);
......@@ -4456,11 +4657,39 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* PCS link status */
if (priv->hw->pcs) {
if (priv->xstats.pcs_link)
netif_carrier_on(dev);
netif_carrier_on(priv->dev);
else
netif_carrier_off(dev);
netif_carrier_off(priv->dev);
}
}
}
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
* @dev_id: to pass the net device pointer.
* Description: this is the main driver interrupt service routine.
* It can call:
* o DMA service routine (to manage incoming frame reception and transmission
* status)
* o Core interrupts to manage: remote wake-up, management counter, LPI
* interrupts.
*/
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
/* Check if a fatal error happened */
if (stmmac_safety_feat_interrupt(priv))
return IRQ_HANDLED;
/* To handle Common interrupts */
stmmac_common_interrupt(priv);
/* To handle DMA interrupts */
stmmac_dma_interrupt(priv);
......@@ -4468,15 +4697,136 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
if (unlikely(!dev)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
}
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
/* To handle Common interrupts */
stmmac_common_interrupt(priv);
return IRQ_HANDLED;
}
static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
if (unlikely(!dev)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
}
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
/* Check if a fatal error happened */
stmmac_safety_feat_interrupt(priv);
return IRQ_HANDLED;
}
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
int chan = tx_q->queue_index;
struct stmmac_priv *priv;
int status;
priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
}
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
tc <= 256) {
tc += 64;
if (priv->plat->force_thresh_dma_mode)
stmmac_set_dma_operation_mode(priv,
tc,
tc,
chan);
else
stmmac_set_dma_operation_mode(priv,
tc,
SF_DMA_MODE,
chan);
priv->xstats.threshold = tc;
}
} else if (unlikely(status == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
return IRQ_HANDLED;
}
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
{
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
int chan = rx_q->queue_index;
struct stmmac_priv *priv;
priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
}
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
stmmac_napi_check(priv, chan, DMA_DIR_RX);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
* to allow network I/O with interrupts disabled.
*/
static void stmmac_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev);
enable_irq(dev->irq);
struct stmmac_priv *priv = netdev_priv(dev);
int i;
/* If adapter is down, do nothing */
if (test_bit(STMMAC_DOWN, &priv->state))
return;
if (priv->plat->multi_msi_en) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
} else {
disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
}
#endif
......@@ -5270,10 +5620,17 @@ int stmmac_dvr_probe(struct device *device,
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
priv->rx_irq[i] = res->rx_irq[i];
for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
priv->tx_irq[i] = res->tx_irq[i];
if (!IS_ERR_OR_NULL(res->mac))
memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
......
......@@ -96,6 +96,7 @@ struct stmmac_dma_cfg {
int mixed_burst;
bool aal;
bool eame;
bool multi_msi_en;
};
#define AXI_BLEN 7
......@@ -237,5 +238,13 @@ struct plat_stmmacenet_data {
struct pci_dev *pdev;
bool has_crossts;
int int_snapshot_num;
bool multi_msi_en;
int msi_mac_vec;
int msi_wol_vec;
int msi_lpi_vec;
int msi_sfty_ce_vec;
int msi_sfty_ue_vec;
int msi_rx_base_vec;
int msi_tx_base_vec;
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment