Commit acae4b48 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Updates.

Various changes including updated firmware interface, improved TX ring
allocation scheme, improved out-of-memory logic in NAPI loop, reduced
default rings on multi-port devices, new PCI IDs. Of particular note,

CPU affinity hints from Vasundhara Volam.

TC Flower eswitch support from Sathya Perla.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 33b86ba0 d7bc7305
...@@ -212,6 +212,15 @@ config BNXT_SRIOV ...@@ -212,6 +212,15 @@ config BNXT_SRIOV
Virtualization support in the NetXtreme-C/E products. This Virtualization support in the NetXtreme-C/E products. This
allows for virtual function acceleration in virtual environments. allows for virtual function acceleration in virtual environments.
config BNXT_FLOWER_OFFLOAD
bool "TC Flower offload support for NetXtreme-C/E"
depends on BNXT
default y
---help---
This configuration parameter enables TC Flower packet classifier
offload for eswitch. This option enables SR-IOV switchdev eswitch
offload.
config BNXT_DCB config BNXT_DCB
bool "Data Center Bridging (DCB) Support" bool "Data Center Bridging (DCB) Support"
default n default n
......
obj-$(CONFIG_BNXT) += bnxt_en.o obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_tc.o
...@@ -49,6 +49,8 @@ ...@@ -49,6 +49,8 @@
#include <linux/aer.h> #include <linux/aer.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/cpu_rmap.h> #include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
#include "bnxt_hsi.h" #include "bnxt_hsi.h"
#include "bnxt.h" #include "bnxt.h"
...@@ -58,6 +60,7 @@ ...@@ -58,6 +60,7 @@
#include "bnxt_dcb.h" #include "bnxt_dcb.h"
#include "bnxt_xdp.h" #include "bnxt_xdp.h"
#include "bnxt_vfr.h" #include "bnxt_vfr.h"
#include "bnxt_tc.h"
#define BNXT_TX_TIMEOUT (5 * HZ) #define BNXT_TX_TIMEOUT (5 * HZ)
...@@ -103,6 +106,8 @@ enum board_idx { ...@@ -103,6 +106,8 @@ enum board_idx {
BCM57416_NPAR, BCM57416_NPAR,
BCM57452, BCM57452,
BCM57454, BCM57454,
BCM58802,
BCM58808,
NETXTREME_E_VF, NETXTREME_E_VF,
NETXTREME_C_VF, NETXTREME_C_VF,
}; };
...@@ -111,39 +116,42 @@ enum board_idx { ...@@ -111,39 +116,42 @@ enum board_idx {
static const struct { static const struct {
char *name; char *name;
} board_info[] = { } board_info[] = {
{ "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
{ "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
{ "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
{ "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
{ "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
{ "Broadcom NetXtreme-E Ethernet Virtual Function" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom NetXtreme-C Ethernet Virtual Function" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
}; };
static const struct pci_device_id bnxt_pci_tbl[] = { static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
...@@ -174,8 +182,9 @@ static const struct pci_device_id bnxt_pci_tbl[] = { ...@@ -174,8 +182,9 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
...@@ -1843,6 +1852,13 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) ...@@ -1843,6 +1852,13 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
&event); &event);
if (likely(rc >= 0)) if (likely(rc >= 0))
rx_pkts += rc; rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
* the NAPI budget. Otherwise, we may potentially loop
* here forever if we consistently cannot allocate
* buffers.
*/
else if (rc == -ENOMEM)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */ else if (rc == -EBUSY) /* partial completion */
break; break;
} else if (unlikely((TX_CMP_TYPE(txcmp) == } else if (unlikely((TX_CMP_TYPE(txcmp) ==
...@@ -4461,9 +4477,33 @@ static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) ...@@ -4461,9 +4477,33 @@ static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (!rc)
bp->tx_reserved_rings = *tx_rings;
return rc; return rc;
} }
static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
{
struct hwrm_func_cfg_input req = {0};
int rc;
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_VF(bp))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
req.num_tx_rings = cpu_to_le16(tx_rings);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
return 0;
}
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
u32 buf_tmrs, u16 flags, u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
...@@ -5115,6 +5155,15 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) ...@@ -5115,6 +5155,15 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc); rc);
goto err_out; goto err_out;
} }
if (bp->tx_reserved_rings != bp->tx_nr_rings) {
int tx = bp->tx_nr_rings;
if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
tx < bp->tx_nr_rings) {
rc = -ENOMEM;
goto err_out;
}
}
} }
rc = bnxt_hwrm_ring_alloc(bp); rc = bnxt_hwrm_ring_alloc(bp);
...@@ -5521,8 +5570,15 @@ static void bnxt_free_irq(struct bnxt *bp) ...@@ -5521,8 +5570,15 @@ static void bnxt_free_irq(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
irq = &bp->irq_tbl[i]; irq = &bp->irq_tbl[i];
if (irq->requested) if (irq->requested) {
if (irq->have_cpumask) {
irq_set_affinity_hint(irq->vector, NULL);
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
free_irq(irq->vector, bp->bnapi[i]); free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0; irq->requested = 0;
} }
} }
...@@ -5555,6 +5611,21 @@ static int bnxt_request_irq(struct bnxt *bp) ...@@ -5555,6 +5611,21 @@ static int bnxt_request_irq(struct bnxt *bp)
break; break;
irq->requested = 1; irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
irq->have_cpumask = 1;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
if (rc) {
netdev_warn(bp->dev,
"Set affinity failed, IRQ = %d\n",
irq->vector);
break;
}
}
} }
return rc; return rc;
} }
...@@ -5726,6 +5797,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) ...@@ -5726,6 +5797,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
link_info->support_auto_speeds = link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode); le16_to_cpu(resp->supported_speeds_auto_mode);
bp->port_count = resp->port_cnt;
hwrm_phy_qcaps_exit: hwrm_phy_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
return rc; return rc;
...@@ -6998,7 +7071,7 @@ static void bnxt_sp_task(struct work_struct *work) ...@@ -6998,7 +7071,7 @@ static void bnxt_sp_task(struct work_struct *work)
} }
/* Under rtnl_lock */ /* Under rtnl_lock */
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp) int tx_xdp)
{ {
int max_rx, max_tx, tx_sets = 1; int max_rx, max_tx, tx_sets = 1;
...@@ -7019,10 +7092,7 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, ...@@ -7019,10 +7092,7 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (max_tx < tx_rings_needed) if (max_tx < tx_rings_needed)
return -ENOMEM; return -ENOMEM;
if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
tx_rings_needed < (tx * tx_sets + tx_xdp))
return -ENOMEM;
return 0;
} }
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
...@@ -7211,7 +7281,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) ...@@ -7211,7 +7281,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (bp->flags & BNXT_FLAG_SHARED_RINGS) if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true; sh = true;
rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
sh, tc, bp->tx_nr_rings_xdp); sh, tc, bp->tx_nr_rings_xdp);
if (rc) if (rc)
return rc; return rc;
...@@ -7237,17 +7307,33 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) ...@@ -7237,17 +7307,33 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return 0; return 0;
} }
static int bnxt_setup_flower(struct net_device *dev,
struct tc_cls_flower_offload *cls_flower)
{
struct bnxt *bp = netdev_priv(dev);
if (BNXT_VF(bp))
return -EOPNOTSUPP;
return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower);
}
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data) void *type_data)
{ {
switch (type) {
case TC_SETUP_CLSFLOWER:
return bnxt_setup_flower(dev, type_data);
case TC_SETUP_MQPRIO: {
struct tc_mqprio_qopt *mqprio = type_data; struct tc_mqprio_qopt *mqprio = type_data;
if (type != TC_SETUP_MQPRIO)
return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
return bnxt_setup_mq_tc(dev, mqprio->num_tc); return bnxt_setup_mq_tc(dev, mqprio->num_tc);
}
default:
return -EOPNOTSUPP;
}
} }
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
...@@ -7643,6 +7729,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) ...@@ -7643,6 +7729,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev); pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev); unregister_netdev(dev);
bnxt_shutdown_tc(bp);
cancel_work_sync(&bp->sp_task); cancel_work_sync(&bp->sp_task);
bp->sp_event = 0; bp->sp_event = 0;
...@@ -7811,6 +7898,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) ...@@ -7811,6 +7898,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
if (sh) if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues(); dflt_rings = netif_get_num_default_rss_queues();
/* Reduce default rings to reduce memory usage on multi-port cards */
if (bp->port_count > 1)
dflt_rings = min_t(int, dflt_rings, 4);
rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc) if (rc)
return rc; return rc;
...@@ -7983,6 +8073,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7983,6 +8073,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_ethtool_init(bp); bnxt_ethtool_init(bp);
bnxt_dcb_init(bp); bnxt_dcb_init(bp);
rc = bnxt_probe_phy(bp);
if (rc)
goto init_err_pci_clean;
bnxt_set_rx_skb_mode(bp, false); bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp); bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp); bnxt_set_ring_params(bp);
...@@ -8017,10 +8111,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -8017,10 +8111,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN; bp->flags |= BNXT_FLAG_STRIP_VLAN;
rc = bnxt_probe_phy(bp);
if (rc)
goto init_err_pci_clean;
rc = bnxt_init_int_mode(bp); rc = bnxt_init_int_mode(bp);
if (rc) if (rc)
goto init_err_pci_clean; goto init_err_pci_clean;
...@@ -8031,9 +8121,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -8031,9 +8121,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else else
device_set_wakeup_capable(&pdev->dev, false); device_set_wakeup_capable(&pdev->dev, false);
if (BNXT_PF(bp))
bnxt_init_tc(bp);
rc = register_netdev(dev); rc = register_netdev(dev);
if (rc) if (rc)
goto init_err_clr_int; goto init_err_cleanup_tc;
if (BNXT_PF(bp)) if (BNXT_PF(bp))
bnxt_dl_register(bp); bnxt_dl_register(bp);
...@@ -8046,7 +8139,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -8046,7 +8139,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
init_err_clr_int: init_err_cleanup_tc:
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp); bnxt_clear_int_mode(bp);
init_err_pci_clean: init_err_pci_clean:
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define DRV_VER_UPD 0 #define DRV_VER_UPD 0
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <net/devlink.h> #include <net/devlink.h>
#include <net/dst_metadata.h> #include <net/dst_metadata.h>
#include <net/switchdev.h> #include <net/switchdev.h>
...@@ -701,8 +702,10 @@ struct bnxt_napi { ...@@ -701,8 +702,10 @@ struct bnxt_napi {
struct bnxt_irq { struct bnxt_irq {
irq_handler_t handler; irq_handler_t handler;
unsigned int vector; unsigned int vector;
u8 requested; u8 requested:1;
u8 have_cpumask:1;
char name[IFNAMSIZ + 2]; char name[IFNAMSIZ + 2];
cpumask_var_t cpu_mask;
}; };
#define HWRM_RING_ALLOC_TX 0x1 #define HWRM_RING_ALLOC_TX 0x1
...@@ -941,6 +944,27 @@ struct bnxt_test_info { ...@@ -941,6 +944,27 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000 #define BNXT_CAG_REG_BASE 0x300000
struct bnxt_tc_info {
bool enabled;
/* hash table to store TC offloaded flows */
struct rhashtable flow_table;
struct rhashtable_params flow_ht_params;
/* hash table to store L2 keys of TC flows */
struct rhashtable l2_table;
struct rhashtable_params l2_ht_params;
/* lock to atomically add/del an l2 node when a flow is
* added or deleted.
*/
struct mutex lock;
/* Stat counter mask (width) */
u64 bytes_mask;
u64 packets_mask;
};
struct bnxt_vf_rep_stats { struct bnxt_vf_rep_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
...@@ -988,6 +1012,9 @@ struct bnxt { ...@@ -988,6 +1012,9 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730 #define CHIP_NUM_5745X 0xd730
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58808 0xd808
#define BNXT_CHIP_NUM_5730X(chip_num) \ #define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \ ((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304) (chip_num) <= CHIP_NUM_57304)
...@@ -1019,6 +1046,10 @@ struct bnxt { ...@@ -1019,6 +1046,10 @@ struct bnxt {
#define BNXT_CHIP_NUM_57X1X(chip_num) \ #define BNXT_CHIP_NUM_57X1X(chip_num) \
(BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num)) (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
#define BNXT_CHIP_NUM_588XX(chip_num) \
((chip_num) == CHIP_NUM_58802 || \
(chip_num) == CHIP_NUM_58808)
struct net_device *dev; struct net_device *dev;
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -1077,6 +1108,7 @@ struct bnxt { ...@@ -1077,6 +1108,7 @@ struct bnxt {
#define BNXT_CHIP_P4_PLUS(bp) \ #define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \ (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
BNXT_CHIP_NUM_5745X((bp)->chip_num) || \ BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
BNXT_CHIP_NUM_588XX((bp)->chip_num) || \
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \ (BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp))) !BNXT_CHIP_TYPE_NITRO_A0(bp)))
...@@ -1118,6 +1150,7 @@ struct bnxt { ...@@ -1118,6 +1150,7 @@ struct bnxt {
int tx_nr_rings; int tx_nr_rings;
int tx_nr_rings_per_tc; int tx_nr_rings_per_tc;
int tx_nr_rings_xdp; int tx_nr_rings_xdp;
int tx_reserved_rings;
int tx_wake_thresh; int tx_wake_thresh;
int tx_push_thresh; int tx_push_thresh;
...@@ -1196,6 +1229,7 @@ struct bnxt { ...@@ -1196,6 +1229,7 @@ struct bnxt {
u8 nge_port_cnt; u8 nge_port_cnt;
__le16 nge_fw_dst_port_id; __le16 nge_fw_dst_port_id;
u8 port_partition_type; u8 port_partition_type;
u8 port_count;
u16 br_mode; u16 br_mode;
u16 rx_coal_ticks; u16 rx_coal_ticks;
...@@ -1277,6 +1311,7 @@ struct bnxt { ...@@ -1277,6 +1311,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode; enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */ u16 *cfa_code_map; /* cfa_code -> vf_idx map */
struct bnxt_tc_info tc_info;
}; };
#define BNXT_RX_STATS_OFFSET(counter) \ #define BNXT_RX_STATS_OFFSET(counter) \
...@@ -1346,7 +1381,7 @@ int bnxt_open_nic(struct bnxt *, bool, bool); ...@@ -1346,7 +1381,7 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp); int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp); int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
......
...@@ -435,8 +435,7 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -435,8 +435,7 @@ static int bnxt_set_channels(struct net_device *dev,
} }
tx_xdp = req_rx_rings; tx_xdp = req_rx_rings;
} }
rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
tx_xdp);
if (rc) { if (rc) {
netdev_warn(dev, "Unable to allocate the requested rings\n"); netdev_warn(dev, "Unable to allocate the requested rings\n");
return rc; return rc;
......
...@@ -11,14 +11,14 @@ ...@@ -11,14 +11,14 @@
#ifndef BNXT_HSI_H #ifndef BNXT_HSI_H
#define BNXT_HSI_H #define BNXT_HSI_H
/* HSI and HWRM Specification 1.8.0 */ /* HSI and HWRM Specification 1.8.1 */
#define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 8 #define HWRM_VERSION_MINOR 8
#define HWRM_VERSION_UPDATE 0 #define HWRM_VERSION_UPDATE 1
#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */ #define HWRM_VERSION_RSVD 4 /* non-zero means beta version */
#define HWRM_VERSION_STR "1.8.0.0" #define HWRM_VERSION_STR "1.8.1.4"
/* /*
* Following is the signature for HWRM message field that indicates not * Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed. * applicable (All F's). Need to cast it the size of the field if needed.
...@@ -946,6 +946,7 @@ struct hwrm_func_cfg_input { ...@@ -946,6 +946,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
#define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
#define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
#define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
__le32 enables; __le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
...@@ -1972,7 +1973,12 @@ struct hwrm_port_phy_qcaps_output { ...@@ -1972,7 +1973,12 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1 #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
u8 unused_0; u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
__le16 supported_speeds_force_mode; __le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
...@@ -4407,6 +4413,164 @@ struct hwrm_cfa_ntuple_filter_cfg_output { ...@@ -4407,6 +4413,164 @@ struct hwrm_cfa_ntuple_filter_cfg_output {
u8 valid; u8 valid;
}; };
/* hwrm_cfa_flow_alloc */
/* Input (128 bytes) */
struct hwrm_cfa_flow_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 flags;
#define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
__le16 act_meter_id;
__le16 ref_flow_handle;
__be16 ethertype;
__be16 outer_vlan_tci;
__be16 dmac[3];
__be16 inner_vlan_tci;
__be16 smac[3];
u8 ip_dst_mask_len;
u8 ip_src_mask_len;
__be32 ip_dst[4];
__be32 ip_src[4];
__be16 l4_src_port;
__be16 l4_src_port_mask;
__be16 l4_dst_port;
__be16 l4_dst_port_mask;
__be32 nat_ip_address[4];
__be16 l2_rewrite_dmac[3];
__be16 nat_port;
__be16 l2_rewrite_smac[3];
u8 ip_proto;
u8 unused_0;
};
/* Output (16 bytes) */
struct hwrm_cfa_flow_alloc_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 flow_handle;
u8 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 unused_4;
u8 valid;
};
/* hwrm_cfa_flow_free */
/* Input (24 bytes) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
__le16 unused_0[3];
};
/* Output (32 bytes) */
struct hwrm_cfa_flow_free_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le64 packet;
__le64 byte;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_cfa_flow_stats */
/* Input (40 bytes) */
struct hwrm_cfa_flow_stats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 num_flows;
__le16 flow_handle_0;
__le16 flow_handle_1;
__le16 flow_handle_2;
__le16 flow_handle_3;
__le16 flow_handle_4;
__le16 flow_handle_5;
__le16 flow_handle_6;
__le16 flow_handle_7;
__le16 flow_handle_8;
__le16 flow_handle_9;
__le16 unused_0;
};
/* Output (176 bytes) */
struct hwrm_cfa_flow_stats_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le64 packet_0;
__le64 packet_1;
__le64 packet_2;
__le64 packet_3;
__le64 packet_4;
__le64 packet_5;
__le64 packet_6;
__le64 packet_7;
__le64 packet_8;
__le64 packet_9;
__le64 byte_0;
__le64 byte_1;
__le64 byte_2;
__le64 byte_3;
__le64 byte_4;
__le64 byte_5;
__le64 byte_6;
__le64 byte_7;
__le64 byte_8;
__le64 byte_9;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_cfa_vfr_alloc */ /* hwrm_cfa_vfr_alloc */
/* Input (32 bytes) */ /* Input (32 bytes) */
struct hwrm_cfa_vfr_alloc_input { struct hwrm_cfa_vfr_alloc_input {
...@@ -5534,11 +5698,15 @@ struct hwrm_selftest_qlist_output { ...@@ -5534,11 +5698,15 @@ struct hwrm_selftest_qlist_output {
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 offline_tests; u8 offline_tests;
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 unused_0; u8 unused_0;
__le16 test_timeout; __le16 test_timeout;
u8 unused_1; u8 unused_1;
...@@ -5566,6 +5734,8 @@ struct hwrm_selftest_exec_input { ...@@ -5566,6 +5734,8 @@ struct hwrm_selftest_exec_input {
#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_REQ_FLAGS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_EYE_TEST 0x20UL
u8 unused_0[7]; u8 unused_0[7];
}; };
...@@ -5580,11 +5750,15 @@ struct hwrm_selftest_exec_output { ...@@ -5580,11 +5750,15 @@ struct hwrm_selftest_exec_output {
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 test_success; u8 test_success;
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_EYE_TEST 0x20UL
__le16 unused_0[3]; __le16 unused_0[3];
}; };
...@@ -5767,6 +5941,7 @@ struct cmd_nums { ...@@ -5767,6 +5941,7 @@ struct cmd_nums {
#define HWRM_SELFTEST_QLIST (0x200UL) #define HWRM_SELFTEST_QLIST (0x200UL)
#define HWRM_SELFTEST_EXEC (0x201UL) #define HWRM_SELFTEST_EXEC (0x201UL)
#define HWRM_SELFTEST_IRQ (0x202UL) #define HWRM_SELFTEST_IRQ (0x202UL)
#define HWRM_SELFTEST_RETREIVE_EYE_DATA (0x203UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
...@@ -5984,6 +6159,7 @@ struct hwrm_struct_hdr { ...@@ -5984,6 +6159,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
__le16 len; __le16 len;
u8 version; u8 version;
u8 count; u8 count;
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/if_vlan.h>
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_sriov.h"
#include "bnxt_tc.h"
#include "bnxt_vfr.h"
#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
#define BNXT_FID_INVALID 0xffff
#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
/* Return the dst fid of the func for flow forwarding
* For PFs: src_fid is the fid of the PF
* For VF-reps: src_fid the fid of the VF
*/
static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
{
struct bnxt *bp;
/* check if dev belongs to the same switch */
if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
dev->ifindex);
return BNXT_FID_INVALID;
}
/* Is dev a VF-rep? */
if (dev != pf_bp->dev)
return bnxt_vf_rep_get_fid(dev);
bp = netdev_priv(dev);
return bp->pf.fw_fid;
}
static int bnxt_tc_parse_redir(struct bnxt *bp,
struct bnxt_tc_actions *actions,
const struct tc_action *tc_act)
{
int ifindex = tcf_mirred_ifindex(tc_act);
struct net_device *dev;
u16 dst_fid;
dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
if (!dev) {
netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
return -EINVAL;
}
/* find the FID from dev */
dst_fid = bnxt_flow_get_dst_fid(bp, dev);
if (dst_fid == BNXT_FID_INVALID) {
netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
return -EINVAL;
}
actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
actions->dst_fid = dst_fid;
actions->dst_dev = dev;
return 0;
}
static void bnxt_tc_parse_vlan(struct bnxt *bp,
struct bnxt_tc_actions *actions,
const struct tc_action *tc_act)
{
if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
} else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
}
}
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
struct tcf_exts *tc_exts)
{
const struct tc_action *tc_act;
LIST_HEAD(tc_actions);
int rc;
if (!tcf_exts_has_actions(tc_exts)) {
netdev_info(bp->dev, "no actions");
return -EINVAL;
}
tcf_exts_to_list(tc_exts, &tc_actions);
list_for_each_entry(tc_act, &tc_actions, list) {
/* Drop action */
if (is_tcf_gact_shot(tc_act)) {
actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
return 0; /* don't bother with other actions */
}
/* Redirect action */
if (is_tcf_mirred_egress_redirect(tc_act)) {
rc = bnxt_tc_parse_redir(bp, actions, tc_act);
if (rc)
return rc;
continue;
}
/* Push/pop VLAN */
if (is_tcf_vlan(tc_act)) {
bnxt_tc_parse_vlan(bp, actions, tc_act);
continue;
}
}
return 0;
}
#define GET_KEY(flow_cmd, key_type) \
skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
(flow_cmd)->key)
#define GET_MASK(flow_cmd, key_type) \
skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
(flow_cmd)->mask)
static int bnxt_tc_parse_flow(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd,
struct bnxt_tc_flow *flow)
{
struct flow_dissector *dissector = tc_flow_cmd->dissector;
u16 addr_type = 0;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
(dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
dissector->used_keys);
return -EOPNOTSUPP;
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
addr_type = key->addr_type;
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
struct flow_dissector_key_basic *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
flow->l2_key.ether_type = key->n_proto;
flow->l2_mask.ether_type = mask->n_proto;
if (key->n_proto == htons(ETH_P_IP) ||
key->n_proto == htons(ETH_P_IPV6)) {
flow->l4_key.ip_proto = key->ip_proto;
flow->l4_mask.ip_proto = mask->ip_proto;
}
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
struct flow_dissector_key_eth_addrs *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
ether_addr_copy(flow->l2_key.dmac, key->dst);
ether_addr_copy(flow->l2_mask.dmac, mask->dst);
ether_addr_copy(flow->l2_key.smac, key->src);
ether_addr_copy(flow->l2_mask.smac, mask->src);
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
struct flow_dissector_key_vlan *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
flow->l2_key.inner_vlan_tci =
cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
flow->l2_mask.inner_vlan_tci =
cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
flow->l2_mask.inner_vlan_tpid = htons(0xffff);
flow->l2_key.num_vlans = 1;
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_dissector_key_ipv4_addrs *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
struct flow_dissector_key_ipv4_addrs *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
flow->l3_key.ipv4.daddr.s_addr = key->dst;
flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
flow->l3_key.ipv4.saddr.s_addr = key->src;
flow->l3_mask.ipv4.saddr.s_addr = mask->src;
} else if (dissector_uses_key(dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_dissector_key_ipv6_addrs *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
struct flow_dissector_key_ipv6_addrs *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
flow->l3_key.ipv6.daddr = key->dst;
flow->l3_mask.ipv6.daddr = mask->dst;
flow->l3_key.ipv6.saddr = key->src;
flow->l3_mask.ipv6.saddr = mask->src;
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
struct flow_dissector_key_ports *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
flow->l4_key.ports.dport = key->dst;
flow->l4_mask.ports.dport = mask->dst;
flow->l4_key.ports.sport = key->src;
flow->l4_mask.ports.sport = mask->src;
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
struct flow_dissector_key_icmp *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
struct flow_dissector_key_icmp *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
flow->l4_key.icmp.type = key->type;
flow->l4_key.icmp.code = key->code;
flow->l4_mask.icmp.type = mask->type;
flow->l4_mask.icmp.code = mask->code;
}
return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
}
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
{
struct hwrm_cfa_flow_free_input req = { 0 };
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
req.flow_handle = flow_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
__func__, flow_handle, rc);
return rc;
}
static int ipv6_mask_len(struct in6_addr *mask)
{
int mask_len = 0, i;
for (i = 0; i < 4; i++)
mask_len += inet_mask_len(mask->s6_addr32[i]);
return mask_len;
}
static bool is_wildcard(void *mask, int len)
{
const u8 *p = mask;
int i;
for (i = 0; i < len; i++) {
if (p[i] != 0)
return false;
}
return true;
}
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
__le16 ref_flow_handle, __le16 *flow_handle)
{
struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_tc_actions *actions = &flow->actions;
struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
struct hwrm_cfa_flow_alloc_input req = { 0 };
u16 flow_flags = 0, action_flags = 0;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
req.src_fid = cpu_to_le16(flow->src_fid);
req.ref_flow_handle = ref_flow_handle;
req.ethertype = flow->l2_key.ether_type;
req.ip_proto = flow->l4_key.ip_proto;
if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
}
if (flow->l2_key.num_vlans > 0) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
/* FW expects the inner_vlan_tci value to be set
* in outer_vlan_tci when num_vlans is 1 (which is
* always the case in TC.)
*/
req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
} else {
flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
req.ip_dst_mask_len =
inet_mask_len(l3_mask->ipv4.daddr.s_addr);
req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
req.ip_src_mask_len =
inet_mask_len(l3_mask->ipv4.saddr.s_addr);
} else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
sizeof(req.ip_dst));
req.ip_dst_mask_len =
ipv6_mask_len(&l3_mask->ipv6.daddr);
memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
sizeof(req.ip_src));
req.ip_src_mask_len =
ipv6_mask_len(&l3_mask->ipv6.saddr);
}
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
req.l4_src_port = flow->l4_key.ports.sport;
req.l4_src_port_mask = flow->l4_mask.ports.sport;
req.l4_dst_port = flow->l4_key.ports.dport;
req.l4_dst_port_mask = flow->l4_mask.ports.dport;
} else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
/* l4 ports serve as type/code when ip_proto is ICMP */
req.l4_src_port = htons(flow->l4_key.icmp.type);
req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
req.l4_dst_port = htons(flow->l4_key.icmp.code);
req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
}
req.flags = cpu_to_le16(flow_flags);
if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
} else {
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
req.dst_fid = cpu_to_le16(actions->dst_fid);
}
if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
}
if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
/* Rewrite config with tpid = 0 implies vlan pop */
req.l2_rewrite_vlan_tpid = 0;
memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
}
}
req.action_flags = cpu_to_le16(action_flags);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
*flow_handle = resp->flow_handle;
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
/* Add val to accum while handling a possible wraparound
* of val. Eventhough val is of type u64, its actual width
* is denoted by mask and will wrap-around beyond that width.
*/
static void accumulate_val(u64 *accum, u64 val, u64 mask)
{
#define low_bits(x, mask) ((x) & (mask))
#define high_bits(x, mask) ((x) & ~(mask))
bool wrapped = val < low_bits(*accum, mask);
*accum = high_bits(*accum, mask) + val;
if (wrapped)
*accum += (mask + 1);
}
/* The HW counters' width is much less than 64bits.
* Handle possible wrap-around while updating the stat counters
*/
static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info,
struct bnxt_tc_flow_stats *stats,
struct bnxt_tc_flow_stats *hw_stats)
{
accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
accumulate_val(&stats->packets, hw_stats->packets,
tc_info->packets_mask);
}
/* Fix possible wraparound of the stats queried from HW, calculate
* the delta from prev_stats, and also update the prev_stats.
* The HW flow stats are fetched under the hwrm_cmd_lock mutex.
* This routine is best called while under the mutex so that the
* stats processing happens atomically.
*/
static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info,
struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_stats *stats)
{
struct bnxt_tc_flow_stats *acc_stats, *prev_stats;
acc_stats = &flow->stats;
bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats);
prev_stats = &flow->prev_stats;
stats->bytes = acc_stats->bytes - prev_stats->bytes;
stats->packets = acc_stats->packets - prev_stats->packets;
*prev_stats = *acc_stats;
}
static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp,
__le16 flow_handle,
struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_stats *stats)
{
struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_flow_stats_input req = { 0 };
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
req.num_flows = cpu_to_le16(1);
req.flow_handle_0 = flow_handle;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
stats->packets = le64_to_cpu(resp->packet_0);
stats->bytes = le64_to_cpu(resp->byte_0);
bnxt_flow_stats_calc(&bp->tc_info, flow, stats);
} else {
netdev_info(bp->dev, "error rc=%d", rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int bnxt_tc_put_l2_node(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
struct bnxt_tc_info *tc_info = &bp->tc_info;
int rc;
/* remove flow_node from the L2 shared flow list */
list_del(&flow_node->l2_list_node);
if (--l2_node->refcount == 0) {
rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
tc_info->l2_ht_params);
if (rc)
netdev_err(bp->dev,
"Error: %s: rhashtable_remove_fast: %d",
__func__, rc);
kfree_rcu(l2_node, rcu);
}
return 0;
}
static struct bnxt_tc_l2_node *
bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
struct rhashtable_params ht_params,
struct bnxt_tc_l2_key *l2_key)
{
struct bnxt_tc_l2_node *l2_node;
int rc;
l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
if (!l2_node) {
l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
if (!l2_node) {
rc = -ENOMEM;
return NULL;
}
l2_node->key = *l2_key;
rc = rhashtable_insert_fast(l2_table, &l2_node->node,
ht_params);
if (rc) {
kfree(l2_node);
netdev_err(bp->dev,
"Error: %s: rhashtable_insert_fast: %d",
__func__, rc);
return NULL;
}
INIT_LIST_HEAD(&l2_node->common_l2_flows);
}
return l2_node;
}
/* Get the ref_flow_handle for a flow by checking if there are any other
* flows that share the same L2 key as this flow.
*/
static int
bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_node *flow_node,
__le16 *ref_flow_handle)
{
struct bnxt_tc_info *tc_info = &bp->tc_info;
struct bnxt_tc_flow_node *ref_flow_node;
struct bnxt_tc_l2_node *l2_node;
l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
tc_info->l2_ht_params,
&flow->l2_key);
if (!l2_node)
return -1;
/* If any other flow is using this l2_node, use it's flow_handle
* as the ref_flow_handle
*/
if (l2_node->refcount > 0) {
ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
struct bnxt_tc_flow_node,
l2_list_node);
*ref_flow_handle = ref_flow_node->flow_handle;
} else {
*ref_flow_handle = cpu_to_le16(0xffff);
}
/* Insert the l2_node into the flow_node so that subsequent flows
* with a matching l2 key can use the flow_handle of this flow
* as their ref_flow_handle
*/
flow_node->l2_node = l2_node;
list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
l2_node->refcount++;
return 0;
}
/* After the flow parsing is done, this routine is used for checking
* if there are any aspects of the flow that prevent it from being
* offloaded.
*/
static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
{
/* If L4 ports are specified then ip_proto must be TCP or UDP */
if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
(flow->l4_key.ip_proto != IPPROTO_TCP &&
flow->l4_key.ip_proto != IPPROTO_UDP)) {
netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
flow->l4_key.ip_proto);
return false;
}
return true;
}
static int __bnxt_tc_del_flow(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
struct bnxt_tc_info *tc_info = &bp->tc_info;
int rc;
/* send HWRM cmd to free the flow-id */
bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
mutex_lock(&tc_info->lock);
/* release reference to l2 node */
bnxt_tc_put_l2_node(bp, flow_node);
mutex_unlock(&tc_info->lock);
rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
tc_info->flow_ht_params);
if (rc)
netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
__func__, rc);
kfree_rcu(flow_node, rcu);
return 0;
}
/* Add a new flow or replace an existing flow.
* Notes on locking:
* There are essentially two critical sections here.
* 1. while adding a new flow
* a) lookup l2-key
* b) issue HWRM cmd and get flow_handle
* c) link l2-key with flow
* 2. while deleting a flow
* a) unlinking l2-key from flow
* A lock is needed to protect these two critical sections.
*
* The hash-tables are already protected by the rhashtable API.
*/
static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *tc_flow_cmd)
{
struct bnxt_tc_flow_node *new_node, *old_node;
struct bnxt_tc_info *tc_info = &bp->tc_info;
struct bnxt_tc_flow *flow;
__le16 ref_flow_handle;
int rc;
/* allocate memory for the new flow and it's node */
new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node) {
rc = -ENOMEM;
goto done;
}
new_node->cookie = tc_flow_cmd->cookie;
flow = &new_node->flow;
rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
if (rc)
goto free_node;
flow->src_fid = src_fid;
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -ENOSPC;
goto free_node;
}
/* If a flow exists with the same cookie, delete it */
old_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
if (old_node)
__bnxt_tc_del_flow(bp, old_node);
/* Check if the L2 part of the flow has been offloaded already.
* If so, bump up it's refcnt and get it's reference handle.
*/
mutex_lock(&tc_info->lock);
rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
if (rc)
goto unlock;
/* send HWRM cmd to alloc the flow */
rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
&new_node->flow_handle);
if (rc)
goto put_l2;
/* add new flow to flow-table */
rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
tc_info->flow_ht_params);
if (rc)
goto hwrm_flow_free;
mutex_unlock(&tc_info->lock);
return 0;
hwrm_flow_free:
bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
put_l2:
bnxt_tc_put_l2_node(bp, new_node);
unlock:
mutex_unlock(&tc_info->lock);
free_node:
kfree(new_node);
done:
netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
__func__, tc_flow_cmd->cookie, rc);
return rc;
}
static int bnxt_tc_del_flow(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd)
{
struct bnxt_tc_info *tc_info = &bp->tc_info;
struct bnxt_tc_flow_node *flow_node;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
if (!flow_node) {
netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
tc_flow_cmd->cookie);
return -EINVAL;
}
return __bnxt_tc_del_flow(bp, flow_node);
}
static int bnxt_tc_get_flow_stats(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd)
{
struct bnxt_tc_info *tc_info = &bp->tc_info;
struct bnxt_tc_flow_node *flow_node;
struct bnxt_tc_flow_stats stats;
int rc;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
if (!flow_node) {
netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
tc_flow_cmd->cookie);
return -1;
}
rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle,
&flow_node->flow, &stats);
if (rc)
return rc;
tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0);
return 0;
}
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{
int rc = 0;
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
break;
case TC_CLSFLOWER_DESTROY:
rc = bnxt_tc_del_flow(bp, cls_flower);
break;
case TC_CLSFLOWER_STATS:
rc = bnxt_tc_get_flow_stats(bp, cls_flower);
break;
}
return rc;
}
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
.head_offset = offsetof(struct bnxt_tc_flow_node, node),
.key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
.key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
.automatic_shrinking = true
};
static const struct rhashtable_params bnxt_tc_l2_ht_params = {
.head_offset = offsetof(struct bnxt_tc_l2_node, node),
.key_offset = offsetof(struct bnxt_tc_l2_node, key),
.key_len = BNXT_TC_L2_KEY_LEN,
.automatic_shrinking = true
};
/* convert counter width in bits to a mask */
#define mask(width) ((u64)~0 >> (64 - (width)))
int bnxt_init_tc(struct bnxt *bp)
{
struct bnxt_tc_info *tc_info = &bp->tc_info;
int rc;
if (bp->hwrm_spec_code < 0x10800) {
netdev_warn(bp->dev,
"Firmware does not support TC flower offload.\n");
return -ENOTSUPP;
}
mutex_init(&tc_info->lock);
/* Counter widths are programmed by FW */
tc_info->bytes_mask = mask(36);
tc_info->packets_mask = mask(28);
tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
if (rc)
return rc;
tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
if (rc)
goto destroy_flow_table;
tc_info->enabled = true;
bp->dev->hw_features |= NETIF_F_HW_TC;
bp->dev->features |= NETIF_F_HW_TC;
return 0;
destroy_flow_table:
rhashtable_destroy(&tc_info->flow_table);
return rc;
}
void bnxt_shutdown_tc(struct bnxt *bp)
{
struct bnxt_tc_info *tc_info = &bp->tc_info;
if (!tc_info->enabled)
return;
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
}
#else
#endif
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_TC_H
#define BNXT_TC_H
#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
/* Structs used for storing the filter/actions of the TC cmd.
*/
struct bnxt_tc_l2_key {
u8 dmac[ETH_ALEN];
u8 smac[ETH_ALEN];
__be16 inner_vlan_tpid;
__be16 inner_vlan_tci;
__be16 ether_type;
u8 num_vlans;
};
struct bnxt_tc_l3_key {
union {
struct {
struct in_addr daddr;
struct in_addr saddr;
} ipv4;
struct {
struct in6_addr daddr;
struct in6_addr saddr;
} ipv6;
};
};
struct bnxt_tc_l4_key {
u8 ip_proto;
union {
struct {
__be16 sport;
__be16 dport;
} ports;
struct {
u8 type;
u8 code;
} icmp;
};
};
struct bnxt_tc_actions {
u32 flags;
#define BNXT_TC_ACTION_FLAG_FWD BIT(0)
#define BNXT_TC_ACTION_FLAG_FWD_VXLAN BIT(1)
#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3)
#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4)
#define BNXT_TC_ACTION_FLAG_DROP BIT(5)
u16 dst_fid;
struct net_device *dst_dev;
__be16 push_vlan_tpid;
__be16 push_vlan_tci;
};
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
};
struct bnxt_tc_flow {
u32 flags;
#define BNXT_TC_FLOW_FLAGS_ETH_ADDRS BIT(1)
#define BNXT_TC_FLOW_FLAGS_IPV4_ADDRS BIT(2)
#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3)
#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4)
#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5)
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key;
struct bnxt_tc_l3_key l3_mask;
struct bnxt_tc_l4_key l4_key;
struct bnxt_tc_l4_key l4_mask;
struct bnxt_tc_actions actions;
/* updated stats accounting for hw-counter wrap-around */
struct bnxt_tc_flow_stats stats;
/* previous snap-shot of stats */
struct bnxt_tc_flow_stats prev_stats;
unsigned long lastused; /* jiffies */
};
/* L2 hash table
* This data-struct is used for L2-flow table.
* The L2 part of a flow is stored in a hash table.
* A flow that shares the same L2 key/mask with an
* already existing flow must refer to it's flow handle.
*/
struct bnxt_tc_l2_node {
/* hash key: first 16b of key */
#define BNXT_TC_L2_KEY_LEN 16
struct bnxt_tc_l2_key key;
struct rhash_head node;
/* a linked list of flows that share the same l2 key */
struct list_head common_l2_flows;
/* number of flows sharing the l2 key */
u16 refcount;
struct rcu_head rcu;
};
struct bnxt_tc_flow_node {
/* hash key: provided by TC */
unsigned long cookie;
struct rhash_head node;
struct bnxt_tc_flow flow;
__le16 flow_handle;
/* L2 node in l2 hashtable that shares flow's l2 key */
struct bnxt_tc_l2_node *l2_node;
/* for the shared_flows list maintained in l2_node */
struct list_head l2_list_node;
struct rcu_head rcu;
};
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower);
int bnxt_init_tc(struct bnxt *bp);
void bnxt_shutdown_tc(struct bnxt *bp);
#else /* CONFIG_BNXT_FLOWER_OFFLOAD */
static inline int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{
return -EOPNOTSUPP;
}
static inline int bnxt_init_tc(struct bnxt *bp)
{
return 0;
}
static inline void bnxt_shutdown_tc(struct bnxt *bp)
{
}
#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */
#endif /* BNXT_TC_H */
...@@ -11,10 +11,12 @@ ...@@ -11,10 +11,12 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/jhash.h> #include <linux/jhash.h>
#include <net/pkt_cls.h>
#include "bnxt_hsi.h" #include "bnxt_hsi.h"
#include "bnxt.h" #include "bnxt.h"
#include "bnxt_vfr.h" #include "bnxt_vfr.h"
#include "bnxt_tc.h"
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
...@@ -113,6 +115,21 @@ bnxt_vf_rep_get_stats64(struct net_device *dev, ...@@ -113,6 +115,21 @@ bnxt_vf_rep_get_stats64(struct net_device *dev,
stats->tx_bytes = vf_rep->tx_stats.bytes; stats->tx_bytes = vf_rep->tx_stats.bytes;
} }
static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
struct bnxt *bp = vf_rep->bp;
int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
switch (type) {
case TC_SETUP_CLSFLOWER:
return bnxt_tc_setup_flower(bp, vf_fid, type_data);
default:
return -EOPNOTSUPP;
}
}
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
{ {
u16 vf_idx; u16 vf_idx;
...@@ -182,6 +199,7 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = { ...@@ -182,6 +199,7 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_stop = bnxt_vf_rep_close, .ndo_stop = bnxt_vf_rep_close,
.ndo_start_xmit = bnxt_vf_rep_xmit, .ndo_start_xmit = bnxt_vf_rep_xmit,
.ndo_get_stats64 = bnxt_vf_rep_get_stats64, .ndo_get_stats64 = bnxt_vf_rep_get_stats64,
.ndo_setup_tc = bnxt_vf_rep_setup_tc,
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
}; };
...@@ -468,11 +486,11 @@ int bnxt_dl_register(struct bnxt *bp) ...@@ -468,11 +486,11 @@ int bnxt_dl_register(struct bnxt *bp)
return -ENOMEM; return -ENOMEM;
} }
bnxt_link_bp_to_dl(dl, bp); bnxt_link_bp_to_dl(bp, dl);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
rc = devlink_register(dl, &bp->pdev->dev); rc = devlink_register(dl, &bp->pdev->dev);
if (rc) { if (rc) {
bnxt_link_bp_to_dl(dl, NULL); bnxt_link_bp_to_dl(bp, NULL);
devlink_free(dl); devlink_free(dl);
netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
return rc; return rc;
......
...@@ -24,13 +24,17 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) ...@@ -24,13 +24,17 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp; return ((struct bnxt_dl *)devlink_priv(dl))->bp;
} }
static inline void bnxt_link_bp_to_dl(struct devlink *dl, struct bnxt *bp) /* To clear devlink pointer from bp, pass NULL dl */
static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
{ {
bp->dl = dl;
/* add a back pointer in dl to bp */
if (dl) {
struct bnxt_dl *bp_dl = devlink_priv(dl); struct bnxt_dl *bp_dl = devlink_priv(dl);
bp_dl->bp = bp; bp_dl->bp = bp;
if (bp) }
bp->dl = dl;
} }
int bnxt_dl_register(struct bnxt *bp); int bnxt_dl_register(struct bnxt *bp);
...@@ -41,6 +45,14 @@ void bnxt_vf_reps_open(struct bnxt *bp); ...@@ -41,6 +45,14 @@ void bnxt_vf_reps_open(struct bnxt *bp);
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb); void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code); struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code);
static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
struct bnxt *bp = vf_rep->bp;
return bp->pf.vf[vf_rep->vf_idx].fw_fid;
}
#else #else
static inline int bnxt_dl_register(struct bnxt *bp) static inline int bnxt_dl_register(struct bnxt *bp)
......
...@@ -169,7 +169,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) ...@@ -169,7 +169,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
tc = netdev_get_num_tc(dev); tc = netdev_get_num_tc(dev);
if (!tc) if (!tc)
tc = 1; tc = 1;
rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
true, tc, tx_xdp); true, tc, tx_xdp);
if (rc) { if (rc) {
netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment