Commit a0c6359d authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-netdev_queue_mgmt_ops'

David Wei says:

====================
bnxt_en: implement netdev_queue_mgmt_ops

Implement netdev_queue_mgmt_ops for bnxt added in [1]. This will be used
in the io_uring ZC Rx patchset to configure queues with a custom page
pool w/ a special memory provider for zero copy support.

The first two patches prep the driver, while the final patch adds the
implementation.

Any arbitrary Rx queue can be reset without affecting other queues. V2
and prior of this patchset was thought to only support resetting queues
not in the main RSS context. Upon further testing I realised moving
queues out and calling bnxt_hwrm_vnic_update() wasn't necessary.

I didn't include the netdev core API using this netdev_queue_mgmt_ops
because Mina is adding it in his devmem TCP series [2]. But I'm happy to
include it if folks want to include a user with this series.

I tested this series on BCM957504-N1100FY4 with FW 229.1.123.0. I
manually injected failures at all the places that can return an errno
and confirmed that the device/queue is never left in a broken state.

[1]: https://lore.kernel.org/netdev/20240501232549.1327174-2-shailend@google.com/
[2]: https://lore.kernel.org/netdev/20240607005127.3078656-2-almasrymina@google.com/

v3:
 - tested w/o bnxt_hwrm_vnic_update() and it works on any queue
 - removed unneeded code

v2:
 - fix broken build
 - remove unused var in bnxt_init_one_rx_ring()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9413b1be 2d694c27
...@@ -3316,37 +3316,12 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) ...@@ -3316,37 +3316,12 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
} }
} }
static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{ {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev; struct pci_dev *pdev = bp->pdev;
struct bnxt_tpa_idx_map *map; int i, max_idx;
int i, max_idx, max_agg_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT; max_idx = bp->rx_nr_pages * RX_DESC_CNT;
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
if (!rxr->rx_tpa)
goto skip_rx_tpa_free;
for (i = 0; i < bp->max_tpa; i++) {
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
u8 *data = tpa_info->data;
if (!data)
continue;
dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
tpa_info->data = NULL;
skb_free_frag(data);
}
skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
goto skip_rx_buf_free;
for (i = 0; i < max_idx; i++) { for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
...@@ -3366,12 +3341,15 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) ...@@ -3366,12 +3341,15 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
skb_free_frag(data); skb_free_frag(data);
} }
} }
}
skip_rx_buf_free: static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
if (!rxr->rx_agg_ring) {
goto skip_rx_agg_free; int i, max_idx;
max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
for (i = 0; i < max_agg_idx; i++) { for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page; struct page *page = rx_agg_buf->page;
...@@ -3383,6 +3361,45 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) ...@@ -3383,6 +3361,45 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
page_pool_recycle_direct(rxr->page_pool, page); page_pool_recycle_direct(rxr->page_pool, page);
} }
}
static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev;
struct bnxt_tpa_idx_map *map;
int i;
if (!rxr->rx_tpa)
goto skip_rx_tpa_free;
for (i = 0; i < bp->max_tpa; i++) {
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
u8 *data = tpa_info->data;
if (!data)
continue;
dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
tpa_info->data = NULL;
skb_free_frag(data);
}
skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
goto skip_rx_buf_free;
bnxt_free_one_rx_ring(bp, rxr);
skip_rx_buf_free:
if (!rxr->rx_agg_ring)
goto skip_rx_agg_free;
bnxt_free_one_rx_agg_ring(bp, rxr);
skip_rx_agg_free: skip_rx_agg_free:
map = rxr->rx_tpa_idx_map; map = rxr->rx_tpa_idx_map;
...@@ -3979,6 +3996,62 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) ...@@ -3979,6 +3996,62 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
return 0; return 0;
} }
static void bnxt_init_rx_ring_struct(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_ring_mem_info *rmem;
struct bnxt_ring_struct *ring;
ring = &rxr->rx_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->rx_nr_pages;
rmem->page_size = HW_RXBD_RING_SIZE;
rmem->pg_arr = (void **)rxr->rx_desc_ring;
rmem->dma_arr = rxr->rx_desc_mapping;
rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
rmem->vmem = (void **)&rxr->rx_buf_ring;
ring = &rxr->rx_agg_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->rx_agg_nr_pages;
rmem->page_size = HW_RXBD_RING_SIZE;
rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
rmem->dma_arr = rxr->rx_agg_desc_mapping;
rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
rmem->vmem = (void **)&rxr->rx_agg_ring;
}
static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_ring_mem_info *rmem;
struct bnxt_ring_struct *ring;
int i;
rxr->page_pool->p.napi = NULL;
rxr->page_pool = NULL;
ring = &rxr->rx_ring_struct;
rmem = &ring->ring_mem;
rmem->pg_tbl = NULL;
rmem->pg_tbl_map = 0;
for (i = 0; i < rmem->nr_pages; i++) {
rmem->pg_arr[i] = NULL;
rmem->dma_arr[i] = 0;
}
*rmem->vmem = NULL;
ring = &rxr->rx_agg_ring_struct;
rmem = &ring->ring_mem;
rmem->pg_tbl = NULL;
rmem->pg_tbl_map = 0;
for (i = 0; i < rmem->nr_pages; i++) {
rmem->pg_arr[i] = NULL;
rmem->dma_arr[i] = 0;
}
*rmem->vmem = NULL;
}
static void bnxt_init_ring_struct(struct bnxt *bp) static void bnxt_init_ring_struct(struct bnxt *bp)
{ {
int i, j; int i, j;
...@@ -4061,37 +4134,55 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) ...@@ -4061,37 +4134,55 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
} }
} }
static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int ring_nr)
{ {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct net_device *dev = bp->dev;
u32 prod; u32 prod;
int i; int i;
prod = rxr->rx_prod; prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) { for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size); ring_nr, i, bp->rx_ring_size);
break; break;
} }
prod = NEXT_RX(prod); prod = NEXT_RX(prod);
} }
rxr->rx_prod = prod; rxr->rx_prod = prod;
}
if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
return 0; struct bnxt_rx_ring_info *rxr,
int ring_nr)
{
u32 prod;
int i;
prod = rxr->rx_agg_prod; prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) { for (i = 0; i < bp->rx_agg_ring_size; i++) {
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size); ring_nr, i, bp->rx_ring_size);
break; break;
} }
prod = NEXT_RX_AGG(prod); prod = NEXT_RX_AGG(prod);
} }
rxr->rx_agg_prod = prod; rxr->rx_agg_prod = prod;
}
static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
int i;
bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
if (rxr->rx_tpa) { if (rxr->rx_tpa) {
dma_addr_t mapping; dma_addr_t mapping;
...@@ -4110,9 +4201,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) ...@@ -4110,9 +4201,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
return 0; return 0;
} }
static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{ {
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring_struct *ring; struct bnxt_ring_struct *ring;
u32 type; u32 type;
...@@ -4122,28 +4213,43 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) ...@@ -4122,28 +4213,43 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (NET_IP_ALIGN == 2) if (NET_IP_ALIGN == 2)
type |= RX_BD_FLAGS_SOP; type |= RX_BD_FLAGS_SOP;
rxr = &bp->rx_ring[ring_nr];
ring = &rxr->rx_ring_struct; ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type); bnxt_init_rxbd_pages(ring, type);
netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
&rxr->bnapi->napi);
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
bpf_prog_add(bp->xdp_prog, 1);
rxr->xdp_prog = bp->xdp_prog;
}
ring->fw_ring_id = INVALID_HW_RING_ID; ring->fw_ring_id = INVALID_HW_RING_ID;
}
static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_ring_struct *ring;
u32 type;
ring = &rxr->rx_agg_ring_struct; ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID; ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type); bnxt_init_rxbd_pages(ring, type);
} }
}
static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr;
rxr = &bp->rx_ring[ring_nr];
bnxt_init_one_rx_ring_rxbd(bp, rxr);
netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
&rxr->bnapi->napi);
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
bpf_prog_add(bp->xdp_prog, 1);
rxr->xdp_prog = bp->xdp_prog;
}
bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
return bnxt_alloc_one_rx_ring(bp, ring_nr); return bnxt_alloc_one_rx_ring(bp, ring_nr);
} }
...@@ -6868,6 +6974,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, ...@@ -6868,6 +6974,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
bnxt_set_db_mask(bp, db, ring_type); bnxt_set_db_mask(bp, db, ring_type);
} }
static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
struct bnxt_napi *bnapi = rxr->bnapi;
u32 type = HWRM_RING_ALLOC_RX;
u32 map_idx = bnapi->index;
int rc;
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
return rc;
bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
return 0;
}
static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 type = HWRM_RING_ALLOC_AGG;
u32 grp_idx = ring->grp_idx;
u32 map_idx;
int rc;
map_idx = grp_idx + bp->rx_nr_rings;
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
return rc;
bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
ring->fw_ring_id);
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
return 0;
}
static int bnxt_hwrm_ring_alloc(struct bnxt *bp) static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{ {
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
...@@ -6933,24 +7081,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) ...@@ -6933,24 +7081,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
} }
type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) { for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
struct bnxt_napi *bnapi = rxr->bnapi;
u32 map_idx = bnapi->index;
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc) if (rc)
goto err_out; goto err_out;
bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
/* If we have agg rings, post agg buffers first. */ /* If we have agg rings, post agg buffers first. */
if (!agg_rings) if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
struct bnxt_napi *bnapi = rxr->bnapi;
u32 type2 = HWRM_RING_ALLOC_CMPL; u32 type2 = HWRM_RING_ALLOC_CMPL;
struct bnxt_ring_struct *ring;
u32 map_idx = bnapi->index;
ring = &cpr2->cp_ring_struct; ring = &cpr2->cp_ring_struct;
ring->handle = BNXT_SET_NQ_HDL(cpr2); ring->handle = BNXT_SET_NQ_HDL(cpr2);
...@@ -6964,23 +7109,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) ...@@ -6964,23 +7109,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
} }
if (agg_rings) { if (agg_rings) {
type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) { for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
struct bnxt_ring_struct *ring =
&rxr->rx_agg_ring_struct;
u32 grp_idx = ring->grp_idx;
u32 map_idx = grp_idx + bp->rx_nr_rings;
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc) if (rc)
goto err_out; goto err_out;
bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
ring->fw_ring_id);
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
} }
} }
err_out: err_out:
...@@ -7020,6 +7152,50 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, ...@@ -7020,6 +7152,50 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
return 0; return 0;
} }
static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
{
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
u32 cmpl_ring_id;
if (ring->fw_ring_id == INVALID_HW_RING_ID)
return;
cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
{
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
u32 type, cmpl_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
if (ring->fw_ring_id == INVALID_HW_RING_ID)
return;
cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{ {
u32 type; u32 type;
...@@ -7044,42 +7220,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) ...@@ -7044,42 +7220,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
} }
for (i = 0; i < bp->rx_nr_rings; i++) { for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
u32 grp_idx = rxr->bnapi->index;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[grp_idx].rx_fw_ring_id =
INVALID_HW_RING_ID;
}
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[grp_idx].agg_fw_ring_id =
INVALID_HW_RING_ID;
}
} }
/* The completion rings are about to be freed. After that the /* The completion rings are about to be freed. After that the
...@@ -14828,6 +14970,224 @@ static const struct netdev_stat_ops bnxt_stat_ops = { ...@@ -14828,6 +14970,224 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats, .get_base_stats = bnxt_get_base_stats,
}; };
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
u16 mem_size;
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
mem_size = rxr->rx_agg_bmap_size / 8;
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;
return 0;
}
static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
{
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ring_struct *ring;
int rc;
rxr = &bp->rx_ring[idx];
clone = qmem;
memcpy(clone, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, clone);
bnxt_reset_rx_ring_struct(bp, clone);
clone->rx_prod = 0;
clone->rx_agg_prod = 0;
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
return rc;
ring = &clone->rx_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
goto err_free_rx_ring;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
ring = &clone->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
goto err_free_rx_agg_ring;
rc = bnxt_alloc_rx_agg_bmap(bp, clone);
if (rc)
goto err_free_rx_agg_ring;
}
bnxt_init_one_rx_ring_rxbd(bp, clone);
bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_alloc_one_rx_ring_page(bp, clone, idx);
return 0;
err_free_rx_agg_ring:
bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
err_free_rx_ring:
bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
clone->page_pool->p.napi = NULL;
page_pool_destroy(clone->page_pool);
clone->page_pool = NULL;
return rc;
}
static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
{
struct bnxt_rx_ring_info *rxr = qmem;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ring_struct *ring;
bnxt_free_one_rx_ring(bp, rxr);
bnxt_free_one_rx_agg_ring(bp, rxr);
/* At this point, this NAPI instance has another page pool associated
* with it. Disconnect here before freeing the old page pool to avoid
* warnings.
*/
rxr->page_pool->p.napi = NULL;
page_pool_destroy(rxr->page_pool);
rxr->page_pool = NULL;
ring = &rxr->rx_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
ring = &rxr->rx_agg_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
}
static void bnxt_copy_rx_ring(struct bnxt *bp,
struct bnxt_rx_ring_info *dst,
struct bnxt_rx_ring_info *src)
{
struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
struct bnxt_ring_struct *dst_ring, *src_ring;
int i;
dst_ring = &dst->rx_ring_struct;
dst_rmem = &dst_ring->ring_mem;
src_ring = &src->rx_ring_struct;
src_rmem = &src_ring->ring_mem;
WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
WARN_ON(dst_rmem->page_size != src_rmem->page_size);
WARN_ON(dst_rmem->flags != src_rmem->flags);
WARN_ON(dst_rmem->depth != src_rmem->depth);
WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
dst_rmem->pg_tbl = src_rmem->pg_tbl;
dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
*dst_rmem->vmem = *src_rmem->vmem;
for (i = 0; i < dst_rmem->nr_pages; i++) {
dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
}
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return;
dst_ring = &dst->rx_agg_ring_struct;
dst_rmem = &dst_ring->ring_mem;
src_ring = &src->rx_agg_ring_struct;
src_rmem = &src_ring->ring_mem;
WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
WARN_ON(dst_rmem->page_size != src_rmem->page_size);
WARN_ON(dst_rmem->flags != src_rmem->flags);
WARN_ON(dst_rmem->depth != src_rmem->depth);
WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
dst_rmem->pg_tbl = src_rmem->pg_tbl;
dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
*dst_rmem->vmem = *src_rmem->vmem;
for (i = 0; i < dst_rmem->nr_pages; i++) {
dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
}
dst->rx_agg_bmap = src->rx_agg_bmap;
}
static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
int rc;
rxr = &bp->rx_ring[idx];
clone = qmem;
rxr->rx_prod = clone->rx_prod;
rxr->rx_agg_prod = clone->rx_agg_prod;
rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
rxr->rx_next_cons = clone->rx_next_cons;
rxr->page_pool = clone->page_pool;
bnxt_copy_rx_ring(bp, rxr, clone);
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
return rc;
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
if (rc)
goto err_free_hwrm_rx_ring;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
napi_enable(&rxr->bnapi->napi);
cpr = &rxr->bnapi->cp_ring;
cpr->sw_stats->rx.rx_resets++;
return 0;
err_free_hwrm_rx_ring:
bnxt_hwrm_rx_ring_free(bp, rxr, false);
return rc;
}
static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
rxr = &bp->rx_ring[idx];
napi_disable(&rxr->bnapi->napi);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
return 0;
}
static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
.ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
.ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
.ndo_queue_mem_free = bnxt_queue_mem_free,
.ndo_queue_start = bnxt_queue_start,
.ndo_queue_stop = bnxt_queue_stop,
};
static void bnxt_remove_one(struct pci_dev *pdev) static void bnxt_remove_one(struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
...@@ -15293,6 +15653,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -15293,6 +15653,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stat_ops = &bnxt_stat_ops; dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops; dev->ethtool_ops = &bnxt_ethtool_ops;
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp); rc = bnxt_alloc_hwrm_resources(bp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment