Commit cf0e5c44 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Updates for net-next.

This series includes firmware interface update, some optimizations,
some new PCI IDs, new MTU checks, ethtool reset method, interrupt coalescing
code cleanup, and TC flower offload for vxlan encap/decap from Sathya
Perla.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e324615b cd66358e
...@@ -108,9 +108,11 @@ enum board_idx { ...@@ -108,9 +108,11 @@ enum board_idx {
BCM57452, BCM57452,
BCM57454, BCM57454,
BCM58802, BCM58802,
BCM58804,
BCM58808, BCM58808,
NETXTREME_E_VF, NETXTREME_E_VF,
NETXTREME_C_VF, NETXTREME_C_VF,
NETXTREME_S_VF,
}; };
/* indexed by enum above */ /* indexed by enum above */
...@@ -146,9 +148,11 @@ static const struct { ...@@ -146,9 +148,11 @@ static const struct {
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
}; };
static const struct pci_device_id bnxt_pci_tbl[] = { static const struct pci_device_id bnxt_pci_tbl[] = {
...@@ -186,6 +190,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { ...@@ -186,6 +190,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
...@@ -195,6 +200,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { ...@@ -195,6 +200,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif #endif
{ 0 } { 0 }
}; };
...@@ -219,7 +225,8 @@ static struct workqueue_struct *bnxt_pf_wq; ...@@ -219,7 +225,8 @@ static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx) static bool bnxt_vf_pciid(enum board_idx idx)
{ {
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF);
} }
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
...@@ -2828,7 +2835,8 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) ...@@ -2828,7 +2835,8 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
if (page_mode) { if (page_mode) {
if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
return -EOPNOTSUPP; return -EOPNOTSUPP;
bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU; bp->dev->max_mtu =
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS; bp->flags &= ~BNXT_FLAG_AGG_RINGS;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
bp->dev->hw_features &= ~NETIF_F_LRO; bp->dev->hw_features &= ~NETIF_F_LRO;
...@@ -2836,7 +2844,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) ...@@ -2836,7 +2844,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_dir = DMA_BIDIRECTIONAL; bp->rx_dir = DMA_BIDIRECTIONAL;
bp->rx_skb_func = bnxt_rx_page_skb; bp->rx_skb_func = bnxt_rx_page_skb;
} else { } else {
bp->dev->max_mtu = BNXT_MAX_MTU; bp->dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE; bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb; bp->rx_skb_func = bnxt_rx_skb;
...@@ -4529,19 +4537,42 @@ static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings) ...@@ -4529,19 +4537,42 @@ static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
return 0; return 0;
} }
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{ {
u16 val, tmr, max, flags;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
max = hw_coal->bufs_per_record * hw_coal->budget;
val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
req->num_cmpl_aggr_int = cpu_to_le16(val);
req->num_cmpl_dma_aggr = cpu_to_le16(val);
val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, max);
req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks);
tmr = max_t(u16, tmr, 1);
req->int_lat_tmr_max = cpu_to_le16(tmr);
/* min timer set to 1/2 of interrupt timer */
val = tmr / 2;
req->int_lat_tmr_min = cpu_to_le16(val);
/* buf timer set to 1/4 of interrupt timer */
val = max_t(u16, tmr / 4, 1);
req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq);
tmr = max_t(u16, tmr, 1);
req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
req->flags = cpu_to_le16(flags); req->flags = cpu_to_le16(flags);
req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
/* Minimum time between 2 interrupts set to buf_tmr x 2 */
req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
} }
int bnxt_hwrm_set_coal(struct bnxt *bp) int bnxt_hwrm_set_coal(struct bnxt *bp)
...@@ -4549,51 +4580,14 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) ...@@ -4549,51 +4580,14 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
int i, rc = 0; int i, rc = 0;
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
req_tx = {0}, *req; req_tx = {0}, *req;
u16 max_buf, max_buf_irq;
u16 buf_tmr, buf_tmr_irq;
u32 flags;
bnxt_hwrm_cmd_hdr_init(bp, &req_rx, bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
bnxt_hwrm_cmd_hdr_init(bp, &req_tx, bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
/* Each rx completion (2 records) should be DMAed immediately. bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx);
* DMA 1/4 of the completion buffers at a time. bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx);
*/
max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
/* max_buf must not be zero */
max_buf = clamp_t(u16, max_buf, 1, 63);
max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
/* buf timer set to 1/4 of interrupt timer */
buf_tmr = max_t(u16, buf_tmr / 4, 1);
buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* RING_IDLE generates more IRQs for lower latency. Enable it only
* if coal_ticks is less than 25 us.
*/
if (bp->rx_coal_ticks < 25)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
/* max_buf must not be zero */
max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
/* buf timer set to 1/4 of interrupt timer */
buf_tmr = max_t(u16, buf_tmr / 4, 1);
buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
...@@ -4725,6 +4719,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) ...@@ -4725,6 +4719,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
else else
bp->br_mode = BRIDGE_MODE_UNDEF; bp->br_mode = BRIDGE_MODE_UNDEF;
bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
if (!bp->max_mtu)
bp->max_mtu = BNXT_MAX_MTU;
func_qcfg_exit: func_qcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
return rc; return rc;
...@@ -4885,9 +4883,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) ...@@ -4885,9 +4883,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_upd); resp->hwrm_intf_upd);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
} }
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); resp->hwrm_fw_rsvd);
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout) if (!bp->hwrm_cmd_timeout)
...@@ -6981,6 +6979,11 @@ static void bnxt_timer(unsigned long data) ...@@ -6981,6 +6979,11 @@ static void bnxt_timer(unsigned long data)
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp); bnxt_queue_sp_work(bp);
} }
if (bnxt_tc_flower_enabled(bp)) {
set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
bnxt_restart_timer: bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval); mod_timer(&bp->timer, jiffies + bp->current_interval);
} }
...@@ -7071,6 +7074,10 @@ static void bnxt_sp_task(struct work_struct *work) ...@@ -7071,6 +7074,10 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_get_port_module_status(bp); bnxt_get_port_module_status(bp);
mutex_unlock(&bp->link_lock); mutex_unlock(&bp->link_lock);
} }
if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
bnxt_tc_flow_stats_work(bp);
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting. * must be the last functions to be called before exiting.
*/ */
...@@ -7134,6 +7141,32 @@ static void bnxt_cleanup_pci(struct bnxt *bp) ...@@ -7134,6 +7141,32 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
pci_disable_device(bp->pdev); pci_disable_device(bp->pdev);
} }
static void bnxt_init_dflt_coal(struct bnxt *bp)
{
struct bnxt_coal *coal;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
*/
coal = &bp->rx_coal;
coal->coal_ticks = 14;
coal->coal_bufs = 30;
coal->coal_ticks_irq = 1;
coal->coal_bufs_irq = 2;
coal->idle_thresh = 25;
coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */
coal = &bp->tx_coal;
coal->coal_ticks = 28;
coal->coal_bufs = 30;
coal->coal_ticks_irq = 2;
coal->coal_bufs_irq = 2;
coal->bufs_per_record = 1;
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{ {
int rc; int rc;
...@@ -7202,18 +7235,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) ...@@ -7202,18 +7235,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
/* tick values in micro seconds */ bnxt_init_dflt_coal(bp);
bp->rx_coal_ticks = 12;
bp->rx_coal_bufs = 30;
bp->rx_coal_ticks_irq = 1;
bp->rx_coal_bufs_irq = 2;
bp->tx_coal_ticks = 25;
bp->tx_coal_bufs = 30;
bp->tx_coal_ticks_irq = 2;
bp->tx_coal_bufs_irq = 2;
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp); setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp);
bp->current_interval = BNXT_TIMER_INTERVAL; bp->current_interval = BNXT_TIMER_INTERVAL;
...@@ -7242,13 +7264,13 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) ...@@ -7242,13 +7264,13 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data)) if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
return 0;
rc = bnxt_approve_mac(bp, addr->sa_data); rc = bnxt_approve_mac(bp, addr->sa_data);
if (rc) if (rc)
return rc; return rc;
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
return 0;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev)) { if (netif_running(dev)) {
bnxt_close_nic(bp, false, false); bnxt_close_nic(bp, false, false);
...@@ -7325,7 +7347,7 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, ...@@ -7325,7 +7347,7 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{ {
struct bnxt *bp = cb_priv; struct bnxt *bp = cb_priv;
if (BNXT_VF(bp)) if (!bnxt_tc_flower_enabled(bp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
switch (type) { switch (type) {
...@@ -8088,10 +8110,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -8088,10 +8110,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= dev->hw_features | NETIF_F_HIGHDMA; dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_UNICAST_FLT; dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 60 - 9500 */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = BNXT_MAX_MTU;
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait); init_waitqueue_head(&bp->sriov_cfg_wait);
mutex_init(&bp->sriov_lock); mutex_init(&bp->sriov_lock);
...@@ -8139,6 +8157,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -8139,6 +8157,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_ethtool_init(bp); bnxt_ethtool_init(bp);
bnxt_dcb_init(bp); bnxt_dcb_init(bp);
/* MTU range: 60 - FW defined max */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = bp->max_mtu;
rc = bnxt_probe_phy(bp); rc = bnxt_probe_phy(bp);
if (rc) if (rc)
goto init_err_pci_clean; goto init_err_pci_clean;
......
...@@ -944,6 +944,22 @@ struct bnxt_test_info { ...@@ -944,6 +944,22 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000 #define BNXT_CAG_REG_BASE 0x300000
struct bnxt_coal {
u16 coal_ticks;
u16 coal_ticks_irq;
u16 coal_bufs;
u16 coal_bufs_irq;
/* RING_IDLE enabled when coal ticks < idle_thresh */
u16 idle_thresh;
u8 bufs_per_record;
u8 budget;
};
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
};
struct bnxt_tc_info { struct bnxt_tc_info {
bool enabled; bool enabled;
...@@ -954,12 +970,29 @@ struct bnxt_tc_info { ...@@ -954,12 +970,29 @@ struct bnxt_tc_info {
/* hash table to store L2 keys of TC flows */ /* hash table to store L2 keys of TC flows */
struct rhashtable l2_table; struct rhashtable l2_table;
struct rhashtable_params l2_ht_params; struct rhashtable_params l2_ht_params;
/* hash table to store L2 keys for TC tunnel decap */
struct rhashtable decap_l2_table;
struct rhashtable_params decap_l2_ht_params;
/* hash table to store tunnel decap entries */
struct rhashtable decap_table;
struct rhashtable_params decap_ht_params;
/* hash table to store tunnel encap entries */
struct rhashtable encap_table;
struct rhashtable_params encap_ht_params;
/* lock to atomically add/del an l2 node when a flow is /* lock to atomically add/del an l2 node when a flow is
* added or deleted. * added or deleted.
*/ */
struct mutex lock; struct mutex lock;
/* Fields used for batching stats query */
struct rhashtable_iter iter;
#define BNXT_FLOW_STATS_BATCH_MAX 10
struct bnxt_tc_stats_batch {
void *flow_node;
struct bnxt_tc_flow_stats hw_stats;
} stats_batch[BNXT_FLOW_STATS_BATCH_MAX];
/* Stat counter mask (width) */ /* Stat counter mask (width) */
u64 bytes_mask; u64 bytes_mask;
u64 packets_mask; u64 packets_mask;
...@@ -1013,6 +1046,7 @@ struct bnxt { ...@@ -1013,6 +1046,7 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730 #define CHIP_NUM_5745X 0xd730
#define CHIP_NUM_58802 0xd802 #define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808 #define CHIP_NUM_58808 0xd808
#define BNXT_CHIP_NUM_5730X(chip_num) \ #define BNXT_CHIP_NUM_5730X(chip_num) \
...@@ -1048,6 +1082,7 @@ struct bnxt { ...@@ -1048,6 +1082,7 @@ struct bnxt {
#define BNXT_CHIP_NUM_588XX(chip_num) \ #define BNXT_CHIP_NUM_588XX(chip_num) \
((chip_num) == CHIP_NUM_58802 || \ ((chip_num) == CHIP_NUM_58802 || \
(chip_num) == CHIP_NUM_58804 || \
(chip_num) == CHIP_NUM_58808) (chip_num) == CHIP_NUM_58808)
struct net_device *dev; struct net_device *dev;
...@@ -1170,6 +1205,7 @@ struct bnxt { ...@@ -1170,6 +1205,7 @@ struct bnxt {
int nr_vnics; int nr_vnics;
u32 rss_hash_cfg; u32 rss_hash_cfg;
u16 max_mtu;
u8 max_tc; u8 max_tc;
u8 max_lltc; /* lossless TCs */ u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
...@@ -1232,14 +1268,8 @@ struct bnxt { ...@@ -1232,14 +1268,8 @@ struct bnxt {
u8 port_count; u8 port_count;
u16 br_mode; u16 br_mode;
u16 rx_coal_ticks; struct bnxt_coal rx_coal;
u16 rx_coal_ticks_irq; struct bnxt_coal tx_coal;
u16 rx_coal_bufs;
u16 rx_coal_bufs_irq;
u16 tx_coal_ticks;
u16 tx_coal_ticks_irq;
u16 tx_coal_bufs;
u16 tx_coal_bufs_irq;
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
...@@ -1265,6 +1295,7 @@ struct bnxt { ...@@ -1265,6 +1295,7 @@ struct bnxt {
#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 #define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 #define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
struct bnxt_pf_info pf; struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
...@@ -1315,7 +1346,7 @@ struct bnxt { ...@@ -1315,7 +1346,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode; enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */ u16 *cfa_code_map; /* cfa_code -> vf_idx map */
struct bnxt_tc_info tc_info; struct bnxt_tc_info *tc_info;
}; };
#define BNXT_RX_STATS_OFFSET(counter) \ #define BNXT_RX_STATS_OFFSET(counter) \
......
...@@ -29,7 +29,7 @@ int bnxt_dl_register(struct bnxt *bp) ...@@ -29,7 +29,7 @@ int bnxt_dl_register(struct bnxt *bp)
if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
return 0; return 0;
if (bp->hwrm_spec_code < 0x10800) { if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
return -ENOTSUPP; return -ENOTSUPP;
} }
......
...@@ -26,8 +26,6 @@ ...@@ -26,8 +26,6 @@
#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
static u32 bnxt_get_msglevel(struct net_device *dev) static u32 bnxt_get_msglevel(struct net_device *dev)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
...@@ -46,19 +44,24 @@ static int bnxt_get_coalesce(struct net_device *dev, ...@@ -46,19 +44,24 @@ static int bnxt_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal) struct ethtool_coalesce *coal)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
struct bnxt_coal *hw_coal;
u16 mult;
memset(coal, 0, sizeof(*coal)); memset(coal, 0, sizeof(*coal));
coal->rx_coalesce_usecs = bp->rx_coal_ticks; hw_coal = &bp->rx_coal;
/* 2 completion records per rx packet */ mult = hw_coal->bufs_per_record;
coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2; coal->rx_coalesce_usecs = hw_coal->coal_ticks;
coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq; coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2; coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
coal->tx_coalesce_usecs = bp->tx_coal_ticks; hw_coal = &bp->tx_coal;
coal->tx_max_coalesced_frames = bp->tx_coal_bufs; mult = hw_coal->bufs_per_record;
coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; coal->tx_coalesce_usecs = hw_coal->coal_ticks;
coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
...@@ -70,18 +73,23 @@ static int bnxt_set_coalesce(struct net_device *dev, ...@@ -70,18 +73,23 @@ static int bnxt_set_coalesce(struct net_device *dev,
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
bool update_stats = false; bool update_stats = false;
struct bnxt_coal *hw_coal;
int rc = 0; int rc = 0;
u16 mult;
bp->rx_coal_ticks = coal->rx_coalesce_usecs;
/* 2 completion records per rx packet */ hw_coal = &bp->rx_coal;
bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2; mult = hw_coal->bufs_per_record;
bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq; hw_coal->coal_ticks = coal->rx_coalesce_usecs;
bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
bp->tx_coal_ticks = coal->tx_coalesce_usecs; hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; hw_coal = &bp->rx_coal;
bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->tx_coalesce_usecs;
hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs; u32 stats_ticks = coal->stats_block_coalesce_usecs;
...@@ -822,20 +830,10 @@ static void bnxt_get_drvinfo(struct net_device *dev, ...@@ -822,20 +830,10 @@ static void bnxt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info) struct ethtool_drvinfo *info)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
char *pkglog;
char *pkgver = NULL;
pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
if (pkglog)
pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
if (pkgver && *pkgver != 0 && isdigit(*pkgver)) strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
snprintf(info->fw_version, sizeof(info->fw_version) - 1,
"%s pkg %s", bp->fw_ver_str, pkgver);
else
strlcpy(info->fw_version, bp->fw_ver_str,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp); info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests; info->testinfo_len = bp->num_tests;
...@@ -843,7 +841,6 @@ static void bnxt_get_drvinfo(struct net_device *dev, ...@@ -843,7 +841,6 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->eedump_len = 0; info->eedump_len = 0;
/* TODO CHIMP FW: reg dump details */ /* TODO CHIMP FW: reg dump details */
info->regdump_len = 0; info->regdump_len = 0;
kfree(pkglog);
} }
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
...@@ -1350,7 +1347,6 @@ static int bnxt_firmware_reset(struct net_device *dev, ...@@ -1350,7 +1347,6 @@ static int bnxt_firmware_reset(struct net_device *dev,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
/* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */
/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
/* (e.g. when firmware isn't already running) */ /* (e.g. when firmware isn't already running) */
switch (dir_type) { switch (dir_type) {
...@@ -1376,6 +1372,10 @@ static int bnxt_firmware_reset(struct net_device *dev, ...@@ -1376,6 +1372,10 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNX_DIR_TYPE_BONO_PATCH: case BNX_DIR_TYPE_BONO_PATCH:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
break; break;
case BNXT_FW_RESET_CHIP:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -1773,6 +1773,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, ...@@ -1773,6 +1773,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
dma_addr_t dma_handle; dma_addr_t dma_handle;
struct hwrm_nvm_read_input req = {0}; struct hwrm_nvm_read_input req = {0};
if (!length)
return -EINVAL;
buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
GFP_KERNEL); GFP_KERNEL);
if (!buf) { if (!buf) {
...@@ -2495,13 +2498,59 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, ...@@ -2495,13 +2498,59 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
} }
} }
static int bnxt_reset(struct net_device *dev, u32 *flags)
{
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
if (!BNXT_PF(bp)) {
netdev_err(dev, "Reset is not supported from a VF\n");
return -EOPNOTSUPP;
}
if (pci_vfs_assigned(bp->pdev)) {
netdev_err(dev,
"Reset not allowed when VFs are assigned to VMs\n");
return -EBUSY;
}
if (*flags == ETH_RESET_ALL) {
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code < 0x10803)
return -EOPNOTSUPP;
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
if (!rc)
netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
} else {
rc = -EINVAL;
}
return rc;
}
void bnxt_ethtool_init(struct bnxt *bp) void bnxt_ethtool_init(struct bnxt *bp)
{ {
struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_selftest_qlist_input req = {0}; struct hwrm_selftest_qlist_input req = {0};
struct bnxt_test_info *test_info; struct bnxt_test_info *test_info;
struct net_device *dev = bp->dev;
char *pkglog;
int i, rc; int i, rc;
pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
if (pkglog) {
char *pkgver;
int len;
pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
"/pkg %s", pkgver);
}
kfree(pkglog);
}
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return; return;
...@@ -2592,4 +2641,5 @@ const struct ethtool_ops bnxt_ethtool_ops = { ...@@ -2592,4 +2641,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.nway_reset = bnxt_nway_reset, .nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id, .set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test, .self_test = bnxt_self_test,
.reset = bnxt_reset,
}; };
...@@ -34,6 +34,8 @@ struct bnxt_led_cfg { ...@@ -34,6 +34,8 @@ struct bnxt_led_cfg {
#define BNXT_LED_DFLT_ENABLES(x) \ #define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
#define BNXT_FW_RESET_CHIP 0xffff
extern const struct ethtool_ops bnxt_ethtool_ops; extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
......
...@@ -11,21 +11,21 @@ ...@@ -11,21 +11,21 @@
#ifndef BNXT_HSI_H #ifndef BNXT_HSI_H
#define BNXT_HSI_H #define BNXT_HSI_H
/* HSI and HWRM Specification 1.8.1 */ /* HSI and HWRM Specification 1.8.3 */
#define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 8 #define HWRM_VERSION_MINOR 8
#define HWRM_VERSION_UPDATE 1 #define HWRM_VERSION_UPDATE 3
#define HWRM_VERSION_RSVD 4 /* non-zero means beta version */ #define HWRM_VERSION_RSVD 1 /* non-zero means beta version */
#define HWRM_VERSION_STR "1.8.1.4" #define HWRM_VERSION_STR "1.8.3.1"
/* /*
* Following is the signature for HWRM message field that indicates not * Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed. * applicable (All F's). Need to cast it the size of the field if needed.
*/ */
#define HWRM_NA_SIGNATURE ((__le32)(-1)) #define HWRM_NA_SIGNATURE ((__le32)(-1))
#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ #define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */ #define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ #define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
#define HW_HASH_KEY_SIZE 40 #define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ #define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
...@@ -111,6 +111,7 @@ struct hwrm_async_event_cmpl { ...@@ -111,6 +111,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2; __le32 event_data2;
u8 opaque_v; u8 opaque_v;
...@@ -835,8 +836,7 @@ struct hwrm_func_qcfg_output { ...@@ -835,8 +836,7 @@ struct hwrm_func_qcfg_output {
u8 port_pf_cnt; u8 port_pf_cnt;
#define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
__le16 dflt_vnic_id; __le16 dflt_vnic_id;
u8 unused_0; __le16 max_mtu_configured;
u8 unused_1;
__le32 min_bw; __le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
...@@ -873,12 +873,12 @@ struct hwrm_func_qcfg_output { ...@@ -873,12 +873,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
u8 unused_2; u8 unused_0;
__le16 alloc_vfs; __le16 alloc_vfs;
__le32 alloc_mcast_filters; __le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps; __le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings; __le16 alloc_sp_tx_rings;
u8 unused_3; u8 unused_1;
u8 valid; u8 valid;
}; };
...@@ -3407,6 +3407,7 @@ struct hwrm_vnic_cfg_input { ...@@ -3407,6 +3407,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
__le32 enables; __le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
...@@ -3463,6 +3464,7 @@ struct hwrm_vnic_qcaps_output { ...@@ -3463,6 +3464,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
__le32 unused_2; __le32 unused_2;
u8 unused_3; u8 unused_3;
u8 unused_4; u8 unused_4;
...@@ -3994,6 +3996,7 @@ struct hwrm_cfa_l2_filter_alloc_input { ...@@ -3994,6 +3996,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_7; u8 unused_7;
__le16 dst_id; __le16 dst_id;
...@@ -4122,6 +4125,14 @@ struct hwrm_cfa_l2_set_rx_mask_output { ...@@ -4122,6 +4125,14 @@ struct hwrm_cfa_l2_set_rx_mask_output {
u8 valid; u8 valid;
}; };
/* Command specific Error Codes (8 bytes) */
struct hwrm_cfa_l2_set_rx_mask_cmd_err {
u8 code;
#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
u8 unused_0[7];
};
/* hwrm_cfa_tunnel_filter_alloc */ /* hwrm_cfa_tunnel_filter_alloc */
/* Input (88 bytes) */ /* Input (88 bytes) */
struct hwrm_cfa_tunnel_filter_alloc_input { struct hwrm_cfa_tunnel_filter_alloc_input {
...@@ -4161,6 +4172,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input { ...@@ -4161,6 +4172,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_0; u8 unused_0;
__le32 vni; __le32 vni;
...@@ -4323,6 +4335,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { ...@@ -4323,6 +4335,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 pri_hint; u8 pri_hint;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
...@@ -4355,6 +4368,14 @@ struct hwrm_cfa_ntuple_filter_alloc_output { ...@@ -4355,6 +4368,14 @@ struct hwrm_cfa_ntuple_filter_alloc_output {
u8 valid; u8 valid;
}; };
/* Command specific Error Codes (8 bytes) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
u8 code;
#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
u8 unused_0[7];
};
/* hwrm_cfa_ntuple_filter_free */ /* hwrm_cfa_ntuple_filter_free */
/* Input (24 bytes) */ /* Input (24 bytes) */
struct hwrm_cfa_ntuple_filter_free_input { struct hwrm_cfa_ntuple_filter_free_input {
...@@ -4413,6 +4434,116 @@ struct hwrm_cfa_ntuple_filter_cfg_output { ...@@ -4413,6 +4434,116 @@ struct hwrm_cfa_ntuple_filter_cfg_output {
u8 valid; u8 valid;
}; };
/* hwrm_cfa_decap_filter_alloc */
/* Input (104 bytes) */
struct hwrm_cfa_decap_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 flags;
#define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
__le32 enables;
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
__be32 tunnel_id;
u8 tunnel_type;
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_0;
__le16 unused_1;
u8 src_macaddr[6];
u8 unused_2;
u8 unused_3;
u8 dst_macaddr[6];
__be16 ovlan_vid;
__be16 ivlan_vid;
__be16 t_ovlan_vid;
__be16 t_ivlan_vid;
__be16 ethertype;
u8 ip_addr_type;
#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
u8 ip_protocol;
#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
u8 unused_4;
u8 unused_5;
u8 unused_6[3];
u8 unused_7;
__be32 src_ipaddr[4];
__be32 dst_ipaddr[4];
__be16 src_port;
__be16 dst_port;
__le16 dst_id;
__le16 l2_ctxt_ref_id;
};
/* Output (16 bytes) */
struct hwrm_cfa_decap_filter_alloc_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 decap_filter_id;
u8 unused_0;
u8 unused_1;
u8 unused_2;
u8 valid;
};
/* hwrm_cfa_decap_filter_free */
/* Input (24 bytes) */
struct hwrm_cfa_decap_filter_free_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 decap_filter_id;
__le32 unused_0;
};
/* Output (16 bytes) */
struct hwrm_cfa_decap_filter_free_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_cfa_flow_alloc */ /* hwrm_cfa_flow_alloc */
/* Input (128 bytes) */ /* Input (128 bytes) */
struct hwrm_cfa_flow_alloc_input { struct hwrm_cfa_flow_alloc_input {
...@@ -4634,6 +4765,7 @@ struct hwrm_tunnel_dst_port_query_input { ...@@ -4634,6 +4765,7 @@ struct hwrm_tunnel_dst_port_query_input {
u8 tunnel_type; u8 tunnel_type;
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
u8 unused_0[7]; u8 unused_0[7];
}; };
...@@ -4662,9 +4794,10 @@ struct hwrm_tunnel_dst_port_alloc_input { ...@@ -4662,9 +4794,10 @@ struct hwrm_tunnel_dst_port_alloc_input {
u8 tunnel_type; u8 tunnel_type;
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
u8 unused_0; u8 unused_0;
__be16 tunnel_dst_port_val; __be16 tunnel_dst_port_val;
__le32 unused_1; __be32 unused_1;
}; };
/* Output (16 bytes) */ /* Output (16 bytes) */
...@@ -4693,6 +4826,7 @@ struct hwrm_tunnel_dst_port_free_input { ...@@ -4693,6 +4826,7 @@ struct hwrm_tunnel_dst_port_free_input {
u8 tunnel_type; u8 tunnel_type;
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
u8 unused_0; u8 unused_0;
__le16 tunnel_dst_port_id; __le16 tunnel_dst_port_id;
__le32 unused_1; __le32 unused_1;
...@@ -4848,6 +4982,8 @@ struct hwrm_fw_reset_input { ...@@ -4848,6 +4982,8 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
u8 selfrst_status; u8 selfrst_status;
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
...@@ -4888,6 +5024,8 @@ struct hwrm_fw_qstatus_input { ...@@ -4888,6 +5024,8 @@ struct hwrm_fw_qstatus_input {
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
u8 unused_0[7]; u8 unused_0[7];
}; };
...@@ -5324,6 +5462,32 @@ struct hwrm_wol_reason_qcfg_output { ...@@ -5324,6 +5462,32 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid; u8 valid;
}; };
/* hwrm_dbg_read_direct */
/* Input (32 bytes) */
struct hwrm_dbg_read_direct_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 host_dest_addr;
__le32 read_addr;
__le32 read_len32;
};
/* Output (16 bytes) */
struct hwrm_dbg_read_direct_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_nvm_read */ /* hwrm_nvm_read */
/* Input (40 bytes) */ /* Input (40 bytes) */
struct hwrm_nvm_read_input { struct hwrm_nvm_read_input {
...@@ -5676,6 +5840,105 @@ struct hwrm_nvm_install_update_cmd_err { ...@@ -5676,6 +5840,105 @@ struct hwrm_nvm_install_update_cmd_err {
u8 unused_0[7]; u8 unused_0[7];
}; };
/* hwrm_nvm_get_variable */
/* Input (40 bytes) */
struct hwrm_nvm_get_variable_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 dest_data_addr;
__le16 data_len;
__le16 option_num;
#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
__le16 dimensions;
__le16 index_0;
__le16 index_1;
__le16 index_2;
__le16 index_3;
u8 flags;
#define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
u8 unused_0;
};
/* Output (16 bytes) */
struct hwrm_nvm_get_variable_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 data_len;
__le16 option_num;
#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
u8 unused_0;
u8 unused_1;
u8 unused_2;
u8 valid;
};
/* Command specific Error Codes (8 bytes) */
struct hwrm_nvm_get_variable_cmd_err {
u8 code;
#define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
#define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
#define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
#define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
u8 unused_0[7];
};
/* hwrm_nvm_set_variable */
/* Input (40 bytes) */
struct hwrm_nvm_set_variable_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 src_data_addr;
__le16 data_len;
__le16 option_num;
#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
__le16 dimensions;
__le16 index_0;
__le16 index_1;
__le16 index_2;
__le16 index_3;
u8 flags;
#define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
u8 unused_0;
};
/* Output (16 bytes) */
struct hwrm_nvm_set_variable_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* Command specific Error Codes (8 bytes) */
struct hwrm_nvm_set_variable_cmd_err {
u8 code;
#define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
#define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
#define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
u8 unused_0[7];
};
/* hwrm_selftest_qlist */ /* hwrm_selftest_qlist */
/* Input (16 bytes) */ /* Input (16 bytes) */
struct hwrm_selftest_qlist_input { struct hwrm_selftest_qlist_input {
...@@ -5686,7 +5949,7 @@ struct hwrm_selftest_qlist_input { ...@@ -5686,7 +5949,7 @@ struct hwrm_selftest_qlist_input {
__le64 resp_addr; __le64 resp_addr;
}; };
/* Output (248 bytes) */ /* Output (280 bytes) */
struct hwrm_selftest_qlist_output { struct hwrm_selftest_qlist_output {
__le16 error_code; __le16 error_code;
__le16 req_type; __le16 req_type;
...@@ -5698,15 +5961,15 @@ struct hwrm_selftest_qlist_output { ...@@ -5698,15 +5961,15 @@ struct hwrm_selftest_qlist_output {
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_EYE_TEST 0x10UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_EYE_TEST 0x20UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
u8 offline_tests; u8 offline_tests;
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_EYE_TEST 0x10UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_EYE_TEST 0x20UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
u8 unused_0; u8 unused_0;
__le16 test_timeout; __le16 test_timeout;
u8 unused_1; u8 unused_1;
...@@ -5719,6 +5982,11 @@ struct hwrm_selftest_qlist_output { ...@@ -5719,6 +5982,11 @@ struct hwrm_selftest_qlist_output {
char test5_name[32]; char test5_name[32];
char test6_name[32]; char test6_name[32];
char test7_name[32]; char test7_name[32];
__le32 unused_3;
u8 unused_4;
u8 unused_5;
u8 unused_6;
u8 valid;
}; };
/* hwrm_selftest_exec */ /* hwrm_selftest_exec */
...@@ -5734,8 +6002,8 @@ struct hwrm_selftest_exec_input { ...@@ -5734,8 +6002,8 @@ struct hwrm_selftest_exec_input {
#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_REQ_FLAGS_PCIE_EYE_TEST 0x10UL #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_EYE_TEST 0x20UL #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
u8 unused_0[7]; u8 unused_0[7];
}; };
...@@ -5750,16 +6018,21 @@ struct hwrm_selftest_exec_output { ...@@ -5750,16 +6018,21 @@ struct hwrm_selftest_exec_output {
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_EYE_TEST 0x10UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_EYE_TEST 0x20UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
u8 test_success; u8 test_success;
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_EYE_TEST 0x10UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_EYE_TEST 0x20UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
__le16 unused_0[3]; u8 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 unused_4;
u8 valid;
}; };
/* hwrm_selftest_irq */ /* hwrm_selftest_irq */
...@@ -5772,12 +6045,50 @@ struct hwrm_selftest_irq_input { ...@@ -5772,12 +6045,50 @@ struct hwrm_selftest_irq_input {
__le64 resp_addr; __le64 resp_addr;
}; };
/* Output (8 bytes) */ /* Output (16 bytes) */
struct hwrm_selftest_irq_output { struct hwrm_selftest_irq_output {
__le16 error_code; __le16 error_code;
__le16 req_type; __le16 req_type;
__le16 seq_id; __le16 seq_id;
__le16 resp_len; __le16 resp_len;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_selftest_retrieve_serdes_data */
/* Input (32 bytes) */
struct hwrm_selftest_retrieve_serdes_data_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 resp_data_addr;
__le32 resp_data_offset;
__le16 data_len;
u8 flags;
#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL
#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0
#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
u8 unused_0;
};
/* Output (16 bytes) */
struct hwrm_selftest_retrieve_serdes_data_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 total_data_len;
__le16 copied_data_len;
u8 unused_0;
u8 unused_1;
u8 unused_2;
u8 valid;
}; };
/* Hardware Resource Manager Specification */ /* Hardware Resource Manager Specification */
...@@ -5938,10 +6249,16 @@ struct cmd_nums { ...@@ -5938,10 +6249,16 @@ struct cmd_nums {
#define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL) #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL)
#define HWRM_CFA_DECAP_FILTER_FREE (0x109UL) #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL)
#define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL) #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL)
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL)
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL)
#define HWRM_CFA_PAIR_ALLOC (0x10dUL)
#define HWRM_CFA_PAIR_FREE (0x10eUL)
#define HWRM_CFA_PAIR_INFO (0x10fUL)
#define HWRM_FW_IPC_MSG (0x110UL)
#define HWRM_SELFTEST_QLIST (0x200UL) #define HWRM_SELFTEST_QLIST (0x200UL)
#define HWRM_SELFTEST_EXEC (0x201UL) #define HWRM_SELFTEST_EXEC (0x201UL)
#define HWRM_SELFTEST_IRQ (0x202UL) #define HWRM_SELFTEST_IRQ (0x202UL)
#define HWRM_SELFTEST_RETREIVE_EYE_DATA (0x203UL) #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
...@@ -5949,6 +6266,9 @@ struct cmd_nums { ...@@ -5949,6 +6266,9 @@ struct cmd_nums {
#define HWRM_DBG_DUMP (0xff14UL) #define HWRM_DBG_DUMP (0xff14UL)
#define HWRM_DBG_ERASE_NVM (0xff15UL) #define HWRM_DBG_ERASE_NVM (0xff15UL)
#define HWRM_DBG_CFG (0xff16UL) #define HWRM_DBG_CFG (0xff16UL)
#define HWRM_DBG_COREDUMP_LIST (0xff17UL)
#define HWRM_DBG_COREDUMP_INITIATE (0xff18UL)
#define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL)
#define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL) #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
#define HWRM_NVM_VALIDATE_OPTION (0xffefUL) #define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
#define HWRM_NVM_FLUSH (0xfff0UL) #define HWRM_NVM_FLUSH (0xfff0UL)
...@@ -6123,6 +6443,58 @@ struct rx_port_stats { ...@@ -6123,6 +6443,58 @@ struct rx_port_stats {
__le64 rx_stat_err; __le64 rx_stat_err;
}; };
/* VXLAN IPv4 encapsulation structure (16 bytes) */
struct hwrm_vxlan_ipv4_hdr {
u8 ver_hlen;
#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
u8 tos;
__be16 ip_id;
__be16 flags_frag_offset;
u8 ttl;
u8 protocol;
__be32 src_ip_addr;
__be32 dest_ip_addr;
};
/* VXLAN IPv6 encapsulation structure (32 bytes) */
struct hwrm_vxlan_ipv6_hdr {
__be32 ver_tc_flow_label;
#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
__be16 payload_len;
u8 next_hdr;
u8 ttl;
__be32 src_ip_addr[4];
__be32 dest_ip_addr[4];
};
/* VXLAN encapsulation structure (72 bytes) */
struct hwrm_cfa_encap_data_vxlan {
u8 src_mac_addr[6];
__le16 unused_0;
u8 dst_mac_addr[6];
u8 num_vlan_tags;
u8 unused_1;
__be16 ovlan_tpid;
__be16 ovlan_tci;
__be16 ivlan_tpid;
__be16 ivlan_tci;
__le32 l3[10];
#define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
__be16 src_port;
__be16 dst_port;
__be32 vni;
};
/* Periodic Statistics Context DMA to host (160 bytes) */ /* Periodic Statistics Context DMA to host (160 bytes) */
struct ctx_hw_stats { struct ctx_hw_stats {
__le64 rx_ucast_pkts; __le64 rx_ucast_pkts;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <net/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
#include "bnxt_hsi.h" #include "bnxt_hsi.h"
#include "bnxt.h" #include "bnxt.h"
...@@ -89,6 +90,23 @@ static void bnxt_tc_parse_vlan(struct bnxt *bp, ...@@ -89,6 +90,23 @@ static void bnxt_tc_parse_vlan(struct bnxt *bp,
} }
} }
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
struct bnxt_tc_actions *actions,
const struct tc_action *tc_act)
{
struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
struct ip_tunnel_key *tun_key = &tun_info->key;
if (ip_tunnel_info_af(tun_info) != AF_INET) {
netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
return -EOPNOTSUPP;
}
actions->tun_encap_key = *tun_key;
actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
return 0;
}
static int bnxt_tc_parse_actions(struct bnxt *bp, static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions, struct bnxt_tc_actions *actions,
struct tcf_exts *tc_exts) struct tcf_exts *tc_exts)
...@@ -123,9 +141,35 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, ...@@ -123,9 +141,35 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
bnxt_tc_parse_vlan(bp, actions, tc_act); bnxt_tc_parse_vlan(bp, actions, tc_act);
continue; continue;
} }
/* Tunnel encap */
if (is_tcf_tunnel_set(tc_act)) {
rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
if (rc)
return rc;
continue;
}
/* Tunnel decap */
if (is_tcf_tunnel_release(tc_act)) {
actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
continue;
}
} }
return 0; if (rc)
return rc;
/* Tunnel encap/decap action must be accompanied by a redirect action */
if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
!(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
netdev_info(bp->dev,
"error: no redir action along with encap/decap");
return -EINVAL;
}
return rc;
} }
#define GET_KEY(flow_cmd, key_type) \ #define GET_KEY(flow_cmd, key_type) \
...@@ -252,6 +296,54 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, ...@@ -252,6 +296,54 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
flow->l4_mask.icmp.code = mask->code; flow->l4_mask.icmp.code = mask->code;
} }
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_dissector_key_control *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
addr_type = key->addr_type;
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_dissector_key_ipv4_addrs *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
struct flow_dissector_key_ipv4_addrs *mask =
GET_MASK(tc_flow_cmd,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
flow->tun_key.u.ipv4.dst = key->dst;
flow->tun_mask.u.ipv4.dst = mask->dst;
flow->tun_key.u.ipv4.src = key->src;
flow->tun_mask.u.ipv4.src = mask->src;
} else if (dissector_uses_key(dissector,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
return -EOPNOTSUPP;
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_dissector_key_keyid *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
struct flow_dissector_key_keyid *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
}
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
struct flow_dissector_key_ports *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
struct flow_dissector_key_ports *mask =
GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
flow->tun_key.tp_dst = key->dst;
flow->tun_mask.tp_dst = mask->dst;
flow->tun_key.tp_src = key->src;
flow->tun_mask.tp_src = mask->src;
}
return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
} }
...@@ -293,7 +385,8 @@ static bool is_wildcard(void *mask, int len) ...@@ -293,7 +385,8 @@ static bool is_wildcard(void *mask, int len)
} }
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
__le16 ref_flow_handle, __le16 *flow_handle) __le16 ref_flow_handle,
__le32 tunnel_handle, __le16 *flow_handle)
{ {
struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_tc_actions *actions = &flow->actions; struct bnxt_tc_actions *actions = &flow->actions;
...@@ -307,6 +400,14 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, ...@@ -307,6 +400,14 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
req.src_fid = cpu_to_le16(flow->src_fid); req.src_fid = cpu_to_le16(flow->src_fid);
req.ref_flow_handle = ref_flow_handle; req.ref_flow_handle = ref_flow_handle;
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
req.tunnel_handle = tunnel_handle;
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
}
req.ethertype = flow->l2_key.ether_type; req.ethertype = flow->l2_key.ether_type;
req.ip_proto = flow->l4_key.ip_proto; req.ip_proto = flow->l4_key.ip_proto;
...@@ -403,78 +504,153 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, ...@@ -403,78 +504,153 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
return rc; return rc;
} }
/* Add val to accum while handling a possible wraparound static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
* of val. Eventhough val is of type u64, its actual width struct bnxt_tc_flow *flow,
* is denoted by mask and will wrap-around beyond that width. struct bnxt_tc_l2_key *l2_info,
*/ __le32 ref_decap_handle,
static void accumulate_val(u64 *accum, u64 val, u64 mask) __le32 *decap_filter_handle)
{ {
#define low_bits(x, mask) ((x) & (mask)) struct hwrm_cfa_decap_filter_alloc_output *resp =
#define high_bits(x, mask) ((x) & ~(mask)) bp->hwrm_cmd_resp_addr;
bool wrapped = val < low_bits(*accum, mask); struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
struct ip_tunnel_key *tun_key = &flow->tun_key;
u32 enables = 0;
int rc;
*accum = high_bits(*accum, mask) + val; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
if (wrapped)
*accum += (mask + 1);
}
/* The HW counters' width is much less than 64bits. req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
* Handle possible wrap-around while updating the stat counters enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
*/ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info, req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
struct bnxt_tc_flow_stats *stats, req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
struct bnxt_tc_flow_stats *hw_stats)
{ if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask); enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
accumulate_val(&stats->packets, hw_stats->packets, /* tunnel_id is wrongly defined in hsi defn. as __le32 */
tc_info->packets_mask); req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
ether_addr_copy(req.dst_macaddr, l2_info->dmac);
ether_addr_copy(req.src_macaddr, l2_info->smac);
}
if (l2_info->num_vlans) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
req.t_ivlan_vid = l2_info->inner_vlan_tci;
}
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
req.ethertype = htons(ETH_P_IP);
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
req.src_ipaddr[0] = tun_key->u.ipv4.src;
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
req.dst_port = tun_key->tp_dst;
}
/* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
* is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
*/
req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
req.enables = cpu_to_le32(enables);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
*decap_filter_handle = resp->decap_filter_id;
else
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
} }
/* Fix possible wraparound of the stats queried from HW, calculate static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
* the delta from prev_stats, and also update the prev_stats. __le32 decap_filter_handle)
* The HW flow stats are fetched under the hwrm_cmd_lock mutex.
* This routine is best called while under the mutex so that the
* stats processing happens atomically.
*/
static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info,
struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_stats *stats)
{ {
struct bnxt_tc_flow_stats *acc_stats, *prev_stats; struct hwrm_cfa_decap_filter_free_input req = { 0 };
int rc;
acc_stats = &flow->stats; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats); req.decap_filter_id = decap_filter_handle;
prev_stats = &flow->prev_stats; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
stats->bytes = acc_stats->bytes - prev_stats->bytes; if (rc)
stats->packets = acc_stats->packets - prev_stats->packets; netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
*prev_stats = *acc_stats; return rc;
} }
static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
__le16 flow_handle, struct ip_tunnel_key *encap_key,
struct bnxt_tc_flow *flow, struct bnxt_tc_l2_key *l2_info,
struct bnxt_tc_flow_stats *stats) __le32 *encap_record_handle)
{ {
struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_cfa_encap_record_alloc_output *resp =
struct hwrm_cfa_flow_stats_input req = { 0 }; bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_encap_record_alloc_input req = { 0 };
struct hwrm_cfa_encap_data_vxlan *encap =
(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
(struct hwrm_vxlan_ipv4_hdr *)encap->l3;
int rc; int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
req.num_flows = cpu_to_le16(1);
req.flow_handle_0 = flow_handle;
mutex_lock(&bp->hwrm_cmd_lock); req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
stats->packets = le64_to_cpu(resp->packet_0); ether_addr_copy(encap->src_mac_addr, l2_info->smac);
stats->bytes = le64_to_cpu(resp->byte_0); if (l2_info->num_vlans) {
bnxt_flow_stats_calc(&bp->tc_info, flow, stats); encap->num_vlan_tags = l2_info->num_vlans;
} else { encap->ovlan_tci = l2_info->inner_vlan_tci;
netdev_info(bp->dev, "error rc=%d", rc); encap->ovlan_tpid = l2_info->inner_vlan_tpid;
} }
encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
encap_ipv4->ttl = encap_key->ttl;
encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
encap_ipv4->protocol = IPPROTO_UDP;
encap->dst_port = encap_key->tp_dst;
encap->vni = tunnel_id_to_key32(encap_key->tun_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
*encap_record_handle = resp->encap_record_id;
else
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int hwrm_cfa_encap_record_free(struct bnxt *bp,
__le32 encap_record_handle)
{
struct hwrm_cfa_encap_record_free_input req = { 0 };
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
req.encap_record_id = encap_record_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
return rc; return rc;
} }
...@@ -482,7 +658,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp, ...@@ -482,7 +658,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node) struct bnxt_tc_flow_node *flow_node)
{ {
struct bnxt_tc_l2_node *l2_node = flow_node->l2_node; struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_info *tc_info = bp->tc_info;
int rc; int rc;
/* remove flow_node from the L2 shared flow list */ /* remove flow_node from the L2 shared flow list */
...@@ -519,7 +695,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table, ...@@ -519,7 +695,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
rc = rhashtable_insert_fast(l2_table, &l2_node->node, rc = rhashtable_insert_fast(l2_table, &l2_node->node,
ht_params); ht_params);
if (rc) { if (rc) {
kfree(l2_node); kfree_rcu(l2_node, rcu);
netdev_err(bp->dev, netdev_err(bp->dev,
"Error: %s: rhashtable_insert_fast: %d", "Error: %s: rhashtable_insert_fast: %d",
__func__, rc); __func__, rc);
...@@ -538,7 +714,7 @@ bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, ...@@ -538,7 +714,7 @@ bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_node *flow_node, struct bnxt_tc_flow_node *flow_node,
__le16 *ref_flow_handle) __le16 *ref_flow_handle)
{ {
struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow_node *ref_flow_node; struct bnxt_tc_flow_node *ref_flow_node;
struct bnxt_tc_l2_node *l2_node; struct bnxt_tc_l2_node *l2_node;
...@@ -588,10 +764,380 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) ...@@ -588,10 +764,380 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
return true; return true;
} }
/* Returns the final refcount of the node on success
* or a -ve error code on failure
*/
static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
struct rhashtable *tunnel_table,
struct rhashtable_params *ht_params,
struct bnxt_tc_tunnel_node *tunnel_node)
{
int rc;
if (--tunnel_node->refcount == 0) {
rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
*ht_params);
if (rc) {
netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
rc = -1;
}
kfree_rcu(tunnel_node, rcu);
return rc;
} else {
return tunnel_node->refcount;
}
}
/* Get (or add) either encap or decap tunnel node from/to the supplied
* hash table.
*/
static struct bnxt_tc_tunnel_node *
bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
struct rhashtable_params *ht_params,
struct ip_tunnel_key *tun_key)
{
struct bnxt_tc_tunnel_node *tunnel_node;
int rc;
tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
if (!tunnel_node) {
tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
if (!tunnel_node) {
rc = -ENOMEM;
goto err;
}
tunnel_node->key = *tun_key;
tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
*ht_params);
if (rc) {
kfree_rcu(tunnel_node, rcu);
goto err;
}
}
tunnel_node->refcount++;
return tunnel_node;
err:
netdev_info(bp->dev, "error rc=%d", rc);
return NULL;
}
static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
struct bnxt_tc_flow *flow,
struct bnxt_tc_l2_key *l2_key,
struct bnxt_tc_flow_node *flow_node,
__le32 *ref_decap_handle)
{
struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow_node *ref_flow_node;
struct bnxt_tc_l2_node *decap_l2_node;
decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
tc_info->decap_l2_ht_params,
l2_key);
if (!decap_l2_node)
return -1;
/* If any other flow is using this decap_l2_node, use it's decap_handle
* as the ref_decap_handle
*/
if (decap_l2_node->refcount > 0) {
ref_flow_node =
list_first_entry(&decap_l2_node->common_l2_flows,
struct bnxt_tc_flow_node,
decap_l2_list_node);
*ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
} else {
*ref_decap_handle = INVALID_TUNNEL_HANDLE;
}
/* Insert the l2_node into the flow_node so that subsequent flows
* with a matching decap l2 key can use the decap_filter_handle of
* this flow as their ref_decap_handle
*/
flow_node->decap_l2_node = decap_l2_node;
list_add(&flow_node->decap_l2_list_node,
&decap_l2_node->common_l2_flows);
decap_l2_node->refcount++;
return 0;
}
static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
struct bnxt_tc_info *tc_info = bp->tc_info;
int rc;
/* remove flow_node from the decap L2 sharing flow list */
list_del(&flow_node->decap_l2_list_node);
if (--decap_l2_node->refcount == 0) {
rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
&decap_l2_node->node,
tc_info->decap_l2_ht_params);
if (rc)
netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
kfree_rcu(decap_l2_node, rcu);
}
}
static void bnxt_tc_put_decap_handle(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
__le32 decap_handle = flow_node->decap_node->tunnel_handle;
struct bnxt_tc_info *tc_info = bp->tc_info;
int rc;
if (flow_node->decap_l2_node)
bnxt_tc_put_decap_l2_node(bp, flow_node);
rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
&tc_info->decap_ht_params,
flow_node->decap_node);
if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
hwrm_cfa_decap_filter_free(bp, decap_handle);
}
static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
struct ip_tunnel_key *tun_key,
struct bnxt_tc_l2_key *l2_info,
struct net_device *real_dst_dev)
{
struct flowi4 flow = { {0} };
struct net_device *dst_dev;
struct neighbour *nbr;
struct rtable *rt;
int rc;
flow.flowi4_proto = IPPROTO_UDP;
flow.fl4_dport = tun_key->tp_dst;
flow.daddr = tun_key->u.ipv4.dst;
rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
if (IS_ERR(rt)) {
netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
return -EOPNOTSUPP;
}
/* The route must either point to the real_dst_dev or a dst_dev that
* uses the real_dst_dev.
*/
dst_dev = rt->dst.dev;
if (is_vlan_dev(dst_dev)) {
struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
if (vlan->real_dev != real_dst_dev) {
netdev_info(bp->dev,
"dst_dev(%s) doesn't use PF-if(%s)",
netdev_name(dst_dev),
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP;
goto put_rt;
}
l2_info->inner_vlan_tci = htons(vlan->vlan_id);
l2_info->inner_vlan_tpid = vlan->vlan_proto;
l2_info->num_vlans = 1;
} else if (dst_dev != real_dst_dev) {
netdev_info(bp->dev,
"dst_dev(%s) for %pI4b is not PF-if(%s)",
netdev_name(dst_dev), &flow.daddr,
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP;
goto put_rt;
}
nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
if (!nbr) {
netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
&flow.daddr);
rc = -EOPNOTSUPP;
goto put_rt;
}
tun_key->u.ipv4.src = flow.saddr;
tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
neigh_release(nbr);
ip_rt_put(rt);
return 0;
put_rt:
ip_rt_put(rt);
return rc;
}
static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_node *flow_node,
__le32 *decap_filter_handle)
{
struct ip_tunnel_key *decap_key = &flow->tun_key;
struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_l2_key l2_info = { {0} };
struct bnxt_tc_tunnel_node *decap_node;
struct ip_tunnel_key tun_key = { 0 };
struct bnxt_tc_l2_key *decap_l2_info;
__le32 ref_decap_handle;
int rc;
/* Check if there's another flow using the same tunnel decap.
* If not, add this tunnel to the table and resolve the other
* tunnel header fileds
*/
decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
&tc_info->decap_ht_params,
decap_key);
if (!decap_node)
return -ENOMEM;
flow_node->decap_node = decap_node;
if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
goto done;
/* Resolve the L2 fields for tunnel decap
* Resolve the route for remote vtep (saddr) of the decap key
* Find it's next-hop mac addrs
*/
tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
tun_key.tp_dst = flow->tun_key.tp_dst;
rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
if (rc)
goto put_decap;
decap_key->ttl = tun_key.ttl;
decap_l2_info = &decap_node->l2_info;
ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
if (l2_info.num_vlans) {
decap_l2_info->num_vlans = l2_info.num_vlans;
decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
}
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
/* For getting a decap_filter_handle we first need to check if
* there are any other decap flows that share the same tunnel L2
* key and if so, pass that flow's decap_filter_handle as the
* ref_decap_handle for this flow.
*/
rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
&ref_decap_handle);
if (rc)
goto put_decap;
/* Issue the hwrm cmd to allocate a decap filter handle */
rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
ref_decap_handle,
&decap_node->tunnel_handle);
if (rc)
goto put_decap_l2;
done:
*decap_filter_handle = decap_node->tunnel_handle;
return 0;
put_decap_l2:
bnxt_tc_put_decap_l2_node(bp, flow_node);
put_decap:
bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
&tc_info->decap_ht_params,
flow_node->decap_node);
return rc;
}
static void bnxt_tc_put_encap_handle(struct bnxt *bp,
struct bnxt_tc_tunnel_node *encap_node)
{
__le32 encap_handle = encap_node->tunnel_handle;
struct bnxt_tc_info *tc_info = bp->tc_info;
int rc;
rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params, encap_node);
if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
hwrm_cfa_encap_record_free(bp, encap_handle);
}
/* Lookup the tunnel encap table and check if there's an encap_handle
* alloc'd already.
* If not, query L2 info via a route lookup and issue an encap_record_alloc
* cmd to FW.
*/
static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_node *flow_node,
__le32 *encap_handle)
{
struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_tunnel_node *encap_node;
int rc;
/* Check if there's another flow using the same tunnel encap.
* If not, add this tunnel to the table and resolve the other
* tunnel header fileds
*/
encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params,
encap_key);
if (!encap_node)
return -ENOMEM;
flow_node->encap_node = encap_node;
if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
goto done;
rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
flow->actions.dst_dev);
if (rc)
goto put_encap;
/* Allocate a new tunnel encap record */
rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
&encap_node->tunnel_handle);
if (rc)
goto put_encap;
done:
*encap_handle = encap_node->tunnel_handle;
return 0;
put_encap:
bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params, encap_node);
return rc;
}
static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_node *flow_node)
{
if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
bnxt_tc_put_decap_handle(bp, flow_node);
else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
}
static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_node *flow_node,
__le32 *tunnel_handle)
{
if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
return bnxt_tc_get_decap_handle(bp, flow, flow_node,
tunnel_handle);
else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
return bnxt_tc_get_encap_handle(bp, flow, flow_node,
tunnel_handle);
else
return 0;
}
static int __bnxt_tc_del_flow(struct bnxt *bp, static int __bnxt_tc_del_flow(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node) struct bnxt_tc_flow_node *flow_node)
{ {
struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_info *tc_info = bp->tc_info;
int rc; int rc;
/* send HWRM cmd to free the flow-id */ /* send HWRM cmd to free the flow-id */
...@@ -599,6 +1145,9 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, ...@@ -599,6 +1145,9 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
mutex_lock(&tc_info->lock); mutex_lock(&tc_info->lock);
/* release references to any tunnel encap/decap nodes */
bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
/* release reference to l2 node */ /* release reference to l2 node */
bnxt_tc_put_l2_node(bp, flow_node); bnxt_tc_put_l2_node(bp, flow_node);
...@@ -631,8 +1180,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, ...@@ -631,8 +1180,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *tc_flow_cmd) struct tc_cls_flower_offload *tc_flow_cmd)
{ {
struct bnxt_tc_flow_node *new_node, *old_node; struct bnxt_tc_flow_node *new_node, *old_node;
struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow *flow; struct bnxt_tc_flow *flow;
__le32 tunnel_handle = 0;
__le16 ref_flow_handle; __le16 ref_flow_handle;
int rc; int rc;
...@@ -670,12 +1220,19 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, ...@@ -670,12 +1220,19 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
if (rc) if (rc)
goto unlock; goto unlock;
/* If the flow involves tunnel encap/decap, get tunnel_handle */
rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
if (rc)
goto put_l2;
/* send HWRM cmd to alloc the flow */ /* send HWRM cmd to alloc the flow */
rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
&new_node->flow_handle); tunnel_handle, &new_node->flow_handle);
if (rc) if (rc)
goto put_l2; goto put_tunnel;
flow->lastused = jiffies;
spin_lock_init(&flow->stats_lock);
/* add new flow to flow-table */ /* add new flow to flow-table */
rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
tc_info->flow_ht_params); tc_info->flow_ht_params);
...@@ -687,12 +1244,14 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, ...@@ -687,12 +1244,14 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
hwrm_flow_free: hwrm_flow_free:
bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
put_tunnel:
bnxt_tc_put_tunnel_handle(bp, flow, new_node);
put_l2: put_l2:
bnxt_tc_put_l2_node(bp, new_node); bnxt_tc_put_l2_node(bp, new_node);
unlock: unlock:
mutex_unlock(&tc_info->lock); mutex_unlock(&tc_info->lock);
free_node: free_node:
kfree(new_node); kfree_rcu(new_node, rcu);
done: done:
netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d", netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
__func__, tc_flow_cmd->cookie, rc); __func__, tc_flow_cmd->cookie, rc);
...@@ -702,7 +1261,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, ...@@ -702,7 +1261,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
static int bnxt_tc_del_flow(struct bnxt *bp, static int bnxt_tc_del_flow(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd) struct tc_cls_flower_offload *tc_flow_cmd)
{ {
struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow_node *flow_node; struct bnxt_tc_flow_node *flow_node;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table, flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
...@@ -720,10 +1279,11 @@ static int bnxt_tc_del_flow(struct bnxt *bp, ...@@ -720,10 +1279,11 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
static int bnxt_tc_get_flow_stats(struct bnxt *bp, static int bnxt_tc_get_flow_stats(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd) struct tc_cls_flower_offload *tc_flow_cmd)
{ {
struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow_node *flow_node; struct bnxt_tc_flow_node *flow_node;
struct bnxt_tc_flow_stats stats; struct bnxt_tc_flow *flow;
int rc; unsigned long lastused;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table, flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie, &tc_flow_cmd->cookie,
...@@ -734,15 +1294,183 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, ...@@ -734,15 +1294,183 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
return -1; return -1;
} }
rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle, flow = &flow_node->flow;
&flow_node->flow, &stats); curr_stats = &flow->stats;
prev_stats = &flow->prev_stats;
spin_lock(&flow->stats_lock);
stats.packets = curr_stats->packets - prev_stats->packets;
stats.bytes = curr_stats->bytes - prev_stats->bytes;
*prev_stats = *curr_stats;
lastused = flow->lastused;
spin_unlock(&flow->stats_lock);
tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
lastused);
return 0;
}
static int
bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
struct bnxt_tc_stats_batch stats_batch[])
{
struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_flow_stats_input req = { 0 };
__le16 *req_flow_handles = &req.flow_handle_0;
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
req.num_flows = cpu_to_le16(num_flows);
for (i = 0; i < num_flows; i++) {
struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
req_flow_handles[i] = flow_node->flow_handle;
}
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
__le64 *resp_packets = &resp->packet_0;
__le64 *resp_bytes = &resp->byte_0;
for (i = 0; i < num_flows; i++) {
stats_batch[i].hw_stats.packets =
le64_to_cpu(resp_packets[i]);
stats_batch[i].hw_stats.bytes =
le64_to_cpu(resp_bytes[i]);
}
} else {
netdev_info(bp->dev, "error rc=%d", rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
/* Add val to accum while handling a possible wraparound
* of val. Eventhough val is of type u64, its actual width
* is denoted by mask and will wrap-around beyond that width.
*/
static void accumulate_val(u64 *accum, u64 val, u64 mask)
{
#define low_bits(x, mask) ((x) & (mask))
#define high_bits(x, mask) ((x) & ~(mask))
bool wrapped = val < low_bits(*accum, mask);
*accum = high_bits(*accum, mask) + val;
if (wrapped)
*accum += (mask + 1);
}
/* The HW counters' width is much less than 64bits.
* Handle possible wrap-around while updating the stat counters
*/
static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
struct bnxt_tc_flow_stats *acc_stats,
struct bnxt_tc_flow_stats *hw_stats)
{
accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
accumulate_val(&acc_stats->packets, hw_stats->packets,
tc_info->packets_mask);
}
static int
bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
struct bnxt_tc_stats_batch stats_batch[])
{
struct bnxt_tc_info *tc_info = bp->tc_info;
int rc, i;
rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
if (rc) if (rc)
return rc; return rc;
tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0); for (i = 0; i < num_flows; i++) {
struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
struct bnxt_tc_flow *flow = &flow_node->flow;
spin_lock(&flow->stats_lock);
bnxt_flow_stats_accum(tc_info, &flow->stats,
&stats_batch[i].hw_stats);
if (flow->stats.packets != flow->prev_stats.packets)
flow->lastused = jiffies;
spin_unlock(&flow->stats_lock);
}
return 0; return 0;
} }
static int
bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
struct bnxt_tc_stats_batch stats_batch[],
int *num_flows)
{
struct bnxt_tc_info *tc_info = bp->tc_info;
struct rhashtable_iter *iter = &tc_info->iter;
void *flow_node;
int rc, i;
rc = rhashtable_walk_start(iter);
if (rc && rc != -EAGAIN) {
i = 0;
goto done;
}
rc = 0;
for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
flow_node = rhashtable_walk_next(iter);
if (IS_ERR(flow_node)) {
i = 0;
if (PTR_ERR(flow_node) == -EAGAIN) {
continue;
} else {
rc = PTR_ERR(flow_node);
goto done;
}
}
/* No more flows */
if (!flow_node)
goto done;
stats_batch[i].flow_node = flow_node;
}
done:
rhashtable_walk_stop(iter);
*num_flows = i;
return rc;
}
void bnxt_tc_flow_stats_work(struct bnxt *bp)
{
struct bnxt_tc_info *tc_info = bp->tc_info;
int num_flows, rc;
num_flows = atomic_read(&tc_info->flow_table.nelems);
if (!num_flows)
return;
rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
for (;;) {
rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
&num_flows);
if (rc) {
if (rc == -EAGAIN)
continue;
break;
}
if (!num_flows)
break;
bnxt_tc_flow_stats_batch_update(bp, num_flows,
tc_info->stats_batch);
}
rhashtable_walk_exit(&tc_info->iter);
}
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower) struct tc_cls_flower_offload *cls_flower)
{ {
...@@ -781,19 +1509,37 @@ static const struct rhashtable_params bnxt_tc_l2_ht_params = { ...@@ -781,19 +1509,37 @@ static const struct rhashtable_params bnxt_tc_l2_ht_params = {
.automatic_shrinking = true .automatic_shrinking = true
}; };
static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
.head_offset = offsetof(struct bnxt_tc_l2_node, node),
.key_offset = offsetof(struct bnxt_tc_l2_node, key),
.key_len = BNXT_TC_L2_KEY_LEN,
.automatic_shrinking = true
};
static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
.head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
.key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
.key_len = sizeof(struct ip_tunnel_key),
.automatic_shrinking = true
};
/* convert counter width in bits to a mask */ /* convert counter width in bits to a mask */
#define mask(width) ((u64)~0 >> (64 - (width))) #define mask(width) ((u64)~0 >> (64 - (width)))
int bnxt_init_tc(struct bnxt *bp) int bnxt_init_tc(struct bnxt *bp)
{ {
struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_info *tc_info;
int rc; int rc;
if (bp->hwrm_spec_code < 0x10800) { if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev, netdev_warn(bp->dev,
"Firmware does not support TC flower offload.\n"); "Firmware does not support TC flower offload.\n");
return -ENOTSUPP; return -ENOTSUPP;
} }
tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
if (!tc_info)
return -ENOMEM;
mutex_init(&tc_info->lock); mutex_init(&tc_info->lock);
/* Counter widths are programmed by FW */ /* Counter widths are programmed by FW */
...@@ -803,30 +1549,62 @@ int bnxt_init_tc(struct bnxt *bp) ...@@ -803,30 +1549,62 @@ int bnxt_init_tc(struct bnxt *bp)
tc_info->flow_ht_params = bnxt_tc_flow_ht_params; tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params); rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
if (rc) if (rc)
return rc; goto free_tc_info;
tc_info->l2_ht_params = bnxt_tc_l2_ht_params; tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params); rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
if (rc) if (rc)
goto destroy_flow_table; goto destroy_flow_table;
tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
rc = rhashtable_init(&tc_info->decap_l2_table,
&tc_info->decap_l2_ht_params);
if (rc)
goto destroy_l2_table;
tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
rc = rhashtable_init(&tc_info->decap_table,
&tc_info->decap_ht_params);
if (rc)
goto destroy_decap_l2_table;
tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
rc = rhashtable_init(&tc_info->encap_table,
&tc_info->encap_ht_params);
if (rc)
goto destroy_decap_table;
tc_info->enabled = true; tc_info->enabled = true;
bp->dev->hw_features |= NETIF_F_HW_TC; bp->dev->hw_features |= NETIF_F_HW_TC;
bp->dev->features |= NETIF_F_HW_TC; bp->dev->features |= NETIF_F_HW_TC;
bp->tc_info = tc_info;
return 0; return 0;
destroy_decap_table:
rhashtable_destroy(&tc_info->decap_table);
destroy_decap_l2_table:
rhashtable_destroy(&tc_info->decap_l2_table);
destroy_l2_table:
rhashtable_destroy(&tc_info->l2_table);
destroy_flow_table: destroy_flow_table:
rhashtable_destroy(&tc_info->flow_table); rhashtable_destroy(&tc_info->flow_table);
free_tc_info:
kfree(tc_info);
return rc; return rc;
} }
void bnxt_shutdown_tc(struct bnxt *bp) void bnxt_shutdown_tc(struct bnxt *bp)
{ {
struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_info *tc_info = bp->tc_info;
if (!tc_info->enabled) if (!bnxt_tc_flower_enabled(bp))
return; return;
rhashtable_destroy(&tc_info->flow_table); rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table); rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
rhashtable_destroy(&tc_info->decap_table);
rhashtable_destroy(&tc_info->encap_table);
kfree(tc_info);
bp->tc_info = NULL;
} }
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#ifdef CONFIG_BNXT_FLOWER_OFFLOAD #ifdef CONFIG_BNXT_FLOWER_OFFLOAD
#include <net/ip_tunnels.h>
/* Structs used for storing the filter/actions of the TC cmd. /* Structs used for storing the filter/actions of the TC cmd.
*/ */
struct bnxt_tc_l2_key { struct bnxt_tc_l2_key {
...@@ -50,6 +52,13 @@ struct bnxt_tc_l4_key { ...@@ -50,6 +52,13 @@ struct bnxt_tc_l4_key {
}; };
}; };
struct bnxt_tc_tunnel_key {
struct bnxt_tc_l2_key l2;
struct bnxt_tc_l3_key l3;
struct bnxt_tc_l4_key l4;
__be32 id;
};
struct bnxt_tc_actions { struct bnxt_tc_actions {
u32 flags; u32 flags;
#define BNXT_TC_ACTION_FLAG_FWD BIT(0) #define BNXT_TC_ACTION_FLAG_FWD BIT(0)
...@@ -57,16 +66,16 @@ struct bnxt_tc_actions { ...@@ -57,16 +66,16 @@ struct bnxt_tc_actions {
#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3) #define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3)
#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4) #define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4)
#define BNXT_TC_ACTION_FLAG_DROP BIT(5) #define BNXT_TC_ACTION_FLAG_DROP BIT(5)
#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6)
#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7)
u16 dst_fid; u16 dst_fid;
struct net_device *dst_dev; struct net_device *dst_dev;
__be16 push_vlan_tpid; __be16 push_vlan_tpid;
__be16 push_vlan_tci; __be16 push_vlan_tci;
};
struct bnxt_tc_flow_stats { /* tunnel encap */
u64 packets; struct ip_tunnel_key tun_encap_key;
u64 bytes;
}; };
struct bnxt_tc_flow { struct bnxt_tc_flow {
...@@ -76,6 +85,16 @@ struct bnxt_tc_flow { ...@@ -76,6 +85,16 @@ struct bnxt_tc_flow {
#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3) #define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3)
#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4) #define BNXT_TC_FLOW_FLAGS_PORTS BIT(4)
#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5) #define BNXT_TC_FLOW_FLAGS_ICMP BIT(5)
#define BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS BIT(6)
#define BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS BIT(7)
#define BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS BIT(8)
#define BNXT_TC_FLOW_FLAGS_TUNL_PORTS BIT(9)
#define BNXT_TC_FLOW_FLAGS_TUNL_ID BIT(10)
#define BNXT_TC_FLOW_FLAGS_TUNNEL (BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS | \
BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS | \
BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS |\
BNXT_TC_FLOW_FLAGS_TUNL_PORTS |\
BNXT_TC_FLOW_FLAGS_TUNL_ID)
/* flow applicable to pkts ingressing on this fid */ /* flow applicable to pkts ingressing on this fid */
u16 src_fid; u16 src_fid;
...@@ -85,6 +104,8 @@ struct bnxt_tc_flow { ...@@ -85,6 +104,8 @@ struct bnxt_tc_flow {
struct bnxt_tc_l3_key l3_mask; struct bnxt_tc_l3_key l3_mask;
struct bnxt_tc_l4_key l4_key; struct bnxt_tc_l4_key l4_key;
struct bnxt_tc_l4_key l4_mask; struct bnxt_tc_l4_key l4_mask;
struct ip_tunnel_key tun_key;
struct ip_tunnel_key tun_mask;
struct bnxt_tc_actions actions; struct bnxt_tc_actions actions;
...@@ -93,13 +114,39 @@ struct bnxt_tc_flow { ...@@ -93,13 +114,39 @@ struct bnxt_tc_flow {
/* previous snap-shot of stats */ /* previous snap-shot of stats */
struct bnxt_tc_flow_stats prev_stats; struct bnxt_tc_flow_stats prev_stats;
unsigned long lastused; /* jiffies */ unsigned long lastused; /* jiffies */
/* for calculating delta from prev_stats and
* updating prev_stats atomically.
*/
spinlock_t stats_lock;
};
/* Tunnel encap/decap hash table
* This table is used to maintain a list of flows that use
* the same tunnel encap/decap params (ip_daddrs, vni, udp_dport)
* and the FW returned handle.
* A separate table is maintained for encap and decap
*/
struct bnxt_tc_tunnel_node {
struct ip_tunnel_key key;
struct rhash_head node;
/* tunnel l2 info */
struct bnxt_tc_l2_key l2_info;
#define INVALID_TUNNEL_HANDLE cpu_to_le32(0xffffffff)
/* tunnel handle returned by FW */
__le32 tunnel_handle;
u32 refcount;
struct rcu_head rcu;
}; };
/* L2 hash table /* L2 hash table
* This data-struct is used for L2-flow table. * The same data-struct is used for L2-flow table and L2-tunnel table.
* The L2 part of a flow is stored in a hash table. * The L2 part of a flow or tunnel is stored in a hash table.
* A flow that shares the same L2 key/mask with an * A flow that shares the same L2 key/mask with an
* already existing flow must refer to it's flow handle. * already existing flow/tunnel must refer to it's flow handle or
* decap_filter_id respectively.
*/ */
struct bnxt_tc_l2_node { struct bnxt_tc_l2_node {
/* hash key: first 16b of key */ /* hash key: first 16b of key */
...@@ -110,7 +157,7 @@ struct bnxt_tc_l2_node { ...@@ -110,7 +157,7 @@ struct bnxt_tc_l2_node {
/* a linked list of flows that share the same l2 key */ /* a linked list of flows that share the same l2 key */
struct list_head common_l2_flows; struct list_head common_l2_flows;
/* number of flows sharing the l2 key */ /* number of flows/tunnels sharing the l2 key */
u16 refcount; u16 refcount;
struct rcu_head rcu; struct rcu_head rcu;
...@@ -130,6 +177,16 @@ struct bnxt_tc_flow_node { ...@@ -130,6 +177,16 @@ struct bnxt_tc_flow_node {
/* for the shared_flows list maintained in l2_node */ /* for the shared_flows list maintained in l2_node */
struct list_head l2_list_node; struct list_head l2_list_node;
/* tunnel encap related */
struct bnxt_tc_tunnel_node *encap_node;
/* tunnel decap related */
struct bnxt_tc_tunnel_node *decap_node;
/* L2 node in tunnel-l2 hashtable that shares flow's tunnel l2 key */
struct bnxt_tc_l2_node *decap_l2_node;
/* for the shared_flows list maintained in tunnel decap l2_node */
struct list_head decap_l2_list_node;
struct rcu_head rcu; struct rcu_head rcu;
}; };
...@@ -137,6 +194,12 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, ...@@ -137,6 +194,12 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower); struct tc_cls_flower_offload *cls_flower);
int bnxt_init_tc(struct bnxt *bp); int bnxt_init_tc(struct bnxt *bp);
void bnxt_shutdown_tc(struct bnxt *bp); void bnxt_shutdown_tc(struct bnxt *bp);
void bnxt_tc_flow_stats_work(struct bnxt *bp);
static inline bool bnxt_tc_flower_enabled(struct bnxt *bp)
{
return bp->tc_info && bp->tc_info->enabled;
}
#else /* CONFIG_BNXT_FLOWER_OFFLOAD */ #else /* CONFIG_BNXT_FLOWER_OFFLOAD */
...@@ -154,5 +217,14 @@ static inline int bnxt_init_tc(struct bnxt *bp) ...@@ -154,5 +217,14 @@ static inline int bnxt_init_tc(struct bnxt *bp)
static inline void bnxt_shutdown_tc(struct bnxt *bp) static inline void bnxt_shutdown_tc(struct bnxt *bp)
{ {
} }
static inline void bnxt_tc_flow_stats_work(struct bnxt *bp)
{
}
static inline bool bnxt_tc_flower_enabled(struct bnxt *bp)
{
return false;
}
#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ #endif /* CONFIG_BNXT_FLOWER_OFFLOAD */
#endif /* BNXT_TC_H */ #endif /* BNXT_TC_H */
...@@ -124,6 +124,9 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, ...@@ -124,6 +124,9 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
struct bnxt *bp = vf_rep->bp; struct bnxt *bp = vf_rep->bp;
int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
if (!bnxt_tc_flower_enabled(vf_rep->bp))
return -EOPNOTSUPP;
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return bnxt_tc_setup_flower(bp, vf_fid, type_data); return bnxt_tc_setup_flower(bp, vf_fid, type_data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment