Commit 1dee310c authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'bnxt_en-update-for-net-next'

Pavan Chebbi says:

====================
bnxt_en: Update for net-next

This patchset contains the following updates to bnxt:

- Patch 1 supports handling Downstream Port Containment (DPC) AER
on older chipsets

- Patch 2 enables XPS by default on driver load

- Patch 3 optimizes page pool allocation for numa nodes

- Patch 4 & 5 add support for XDP metadata

- Patch 6 updates firmware interface

- Patch 7 adds a warning about limitations on certain transceivers
====================

Link: https://lore.kernel.org/r/20240402093753.331120-1-pavan.chebbi@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 04172043 e193f53a
......@@ -1296,9 +1296,9 @@ static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return RX_AGG_CMP_VALID(agg, *raw_cons);
}
static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
unsigned int len,
dma_addr_t mapping)
static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
unsigned int len,
dma_addr_t mapping)
{
struct bnxt *bp = bnapi->bp;
struct pci_dev *pdev = bp->pdev;
......@@ -1318,6 +1318,39 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
bp->rx_dir);
skb_put(skb, len);
return skb;
}
static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
unsigned int len,
dma_addr_t mapping)
{
return bnxt_copy_data(bnapi, data, len, mapping);
}
static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
struct xdp_buff *xdp,
unsigned int len,
dma_addr_t mapping)
{
unsigned int metasize = 0;
u8 *data = xdp->data;
struct sk_buff *skb;
len = xdp->data_end - xdp->data_meta;
metasize = xdp->data - xdp->data_meta;
data = xdp->data_meta;
skb = bnxt_copy_data(bnapi, data, len, mapping);
if (!skb)
return skb;
if (metasize) {
skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
}
return skb;
}
......@@ -2104,14 +2137,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (xdp_active) {
if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
rc = 1;
goto next_rx;
}
}
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
if (!xdp_active)
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
else
skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs) {
......@@ -2489,6 +2525,9 @@ static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
}
return false;
}
case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
break;
default:
netdev_err(bp->dev, "FW reported unknown error type %u\n",
err_type);
......@@ -3559,14 +3598,15 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
struct bnxt_rx_ring_info *rxr,
int numa_node)
{
struct page_pool_params pp = { 0 };
pp.pool_size = bp->rx_agg_ring_size;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size;
pp.nid = dev_to_node(&bp->pdev->dev);
pp.nid = numa_node;
pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
......@@ -3586,7 +3626,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
int i, rc = 0, agg_rings = 0;
int numa_node = dev_to_node(&bp->pdev->dev);
int i, rc = 0, agg_rings = 0, cpu;
if (!bp->rx_ring)
return -ENOMEM;
......@@ -3597,10 +3638,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
int cpu_node;
ring = &rxr->rx_ring_struct;
rc = bnxt_alloc_rx_page_pool(bp, rxr);
cpu = cpumask_local_spread(i, numa_node);
cpu_node = cpu_to_node(cpu);
netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
i, cpu_node);
rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
......@@ -11804,6 +11850,46 @@ static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
bnxt_cfg_one_usr_fltr(bp, usr_fltr);
}
static int bnxt_set_xps_mapping(struct bnxt *bp)
{
int numa_node = dev_to_node(&bp->pdev->dev);
unsigned int q_idx, map_idx, cpu, i;
const struct cpumask *cpu_mask_ptr;
int nr_cpus = num_online_cpus();
cpumask_t *q_map;
int rc = 0;
q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
if (!q_map)
return -ENOMEM;
/* Create CPU mask for all TX queues across MQPRIO traffic classes.
* Each TC has the same number of TX queues. The nth TX queue for each
* TC will have the same CPU mask.
*/
for (i = 0; i < nr_cpus; i++) {
map_idx = i % bp->tx_nr_rings_per_tc;
cpu = cpumask_local_spread(i, numa_node);
cpu_mask_ptr = get_cpu_mask(cpu);
cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
}
/* Register CPU mask for each TX queue except the ones marked for XDP */
for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
map_idx = q_idx % bp->tx_nr_rings_per_tc;
rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
if (rc) {
netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
q_idx);
break;
}
}
kfree(q_map);
return rc;
}
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
......@@ -11866,8 +11952,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
if (irq_re_init)
if (irq_re_init) {
udp_tunnel_nic_reset_ntf(bp->dev);
rc = bnxt_set_xps_mapping(bp);
if (rc)
netdev_warn(bp->dev, "failed to set xps mapping\n");
}
if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
if (!static_key_enabled(&bnxt_xdp_locking_key))
......@@ -15550,6 +15640,10 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
netdev_info(bp->dev, "PCI Slot Reset\n");
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
msleep(900);
rtnl_lock();
if (pci_enable_device(pdev)) {
......
......@@ -197,7 +197,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
}
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
......@@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff xdp, struct page *page, u8 **data_ptr,
struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
......@@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data;
orig_data = xdp->data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
......@@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
*len = xdp.data_end - xdp.data;
if (orig_data != xdp.data) {
offset = xdp.data - xdp.data_hard_start;
*data_ptr = xdp.data_hard_start + offset;
*len = xdp->data_end - xdp->data;
if (orig_data != xdp->data) {
offset = xdp->data - xdp->data_hard_start;
*data_ptr = xdp->data_hard_start + offset;
}
switch (act) {
......@@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
mapping = rx_buf->mapping - bp->rx_dma_offset;
*event &= BNXT_TX_CMP_EVENT;
if (unlikely(xdp_buff_has_frags(&xdp))) {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags;
*event = BNXT_AGG_EVENT;
......@@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
......@@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
NEXT_RX(rxr->rx_prod), &xdp);
NEXT_RX(rxr->rx_prod), xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
......@@ -306,12 +306,12 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
page_pool_recycle_direct(rxr->page_pool, page);
return true;
......@@ -326,7 +326,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
......
......@@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff xdp, struct page *page, u8 **data_ptr,
struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment