Commit 88f56254 authored by David Wei's avatar David Wei Committed by David S. Miller

bnxt_en: split rx ring helpers out from ring helpers

To prepare for queue API implementation, split rx ring functions out
from ring helpers. These new helpers will be called from queue API
implementation.
Signed-off-by: default avatarDavid Wei <dw@davidwei.uk>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9413b1be
......@@ -3316,37 +3316,12 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
}
}
static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev;
struct bnxt_tpa_idx_map *map;
int i, max_idx, max_agg_idx;
int i, max_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
if (!rxr->rx_tpa)
goto skip_rx_tpa_free;
for (i = 0; i < bp->max_tpa; i++) {
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
u8 *data = tpa_info->data;
if (!data)
continue;
dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
tpa_info->data = NULL;
skb_free_frag(data);
}
skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
goto skip_rx_buf_free;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
......@@ -3366,12 +3341,15 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
skb_free_frag(data);
}
}
}
skip_rx_buf_free:
if (!rxr->rx_agg_ring)
goto skip_rx_agg_free;
static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
int i, max_idx;
for (i = 0; i < max_agg_idx; i++) {
max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page;
......@@ -3383,6 +3361,45 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
page_pool_recycle_direct(rxr->page_pool, page);
}
}
static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev;
struct bnxt_tpa_idx_map *map;
int i;
if (!rxr->rx_tpa)
goto skip_rx_tpa_free;
for (i = 0; i < bp->max_tpa; i++) {
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
u8 *data = tpa_info->data;
if (!data)
continue;
dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
tpa_info->data = NULL;
skb_free_frag(data);
}
skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
goto skip_rx_buf_free;
bnxt_free_one_rx_ring(bp, rxr);
skip_rx_buf_free:
if (!rxr->rx_agg_ring)
goto skip_rx_agg_free;
bnxt_free_one_rx_agg_ring(bp, rxr);
skip_rx_agg_free:
map = rxr->rx_tpa_idx_map;
......@@ -4061,37 +4078,55 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct net_device *dev = bp->dev;
u32 prod;
int i;
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
}
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int ring_nr)
{
u32 prod;
int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
}
static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
int i;
bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
dma_addr_t mapping;
......@@ -4110,9 +4145,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
return 0;
}
static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring_struct *ring;
u32 type;
......@@ -4122,28 +4157,43 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (NET_IP_ALIGN == 2)
type |= RX_BD_FLAGS_SOP;
rxr = &bp->rx_ring[ring_nr];
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
&rxr->bnapi->napi);
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
bpf_prog_add(bp->xdp_prog, 1);
rxr->xdp_prog = bp->xdp_prog;
}
ring->fw_ring_id = INVALID_HW_RING_ID;
}
static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_ring_struct *ring;
u32 type;
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
}
}
static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr;
rxr = &bp->rx_ring[ring_nr];
bnxt_init_one_rx_ring_rxbd(bp, rxr);
netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
&rxr->bnapi->napi);
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
bpf_prog_add(bp->xdp_prog, 1);
rxr->xdp_prog = bp->xdp_prog;
}
bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
return bnxt_alloc_one_rx_ring(bp, ring_nr);
}
......@@ -6868,6 +6918,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
bnxt_set_db_mask(bp, db, ring_type);
}
static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
struct bnxt_napi *bnapi = rxr->bnapi;
u32 type = HWRM_RING_ALLOC_RX;
u32 map_idx = bnapi->index;
int rc;
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
return rc;
bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
return 0;
}
static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 type = HWRM_RING_ALLOC_AGG;
u32 grp_idx = ring->grp_idx;
u32 map_idx;
int rc;
map_idx = grp_idx + bp->rx_nr_rings;
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
return rc;
bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
ring->fw_ring_id);
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
return 0;
}
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
......@@ -6933,24 +7025,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
struct bnxt_napi *bnapi = rxr->bnapi;
u32 map_idx = bnapi->index;
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
goto err_out;
bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
/* If we have agg rings, post agg buffers first. */
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
struct bnxt_napi *bnapi = rxr->bnapi;
u32 type2 = HWRM_RING_ALLOC_CMPL;
struct bnxt_ring_struct *ring;
u32 map_idx = bnapi->index;
ring = &cpr2->cp_ring_struct;
ring->handle = BNXT_SET_NQ_HDL(cpr2);
......@@ -6964,23 +7053,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
if (agg_rings) {
type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring =
&rxr->rx_agg_ring_struct;
u32 grp_idx = ring->grp_idx;
u32 map_idx = grp_idx + bp->rx_nr_rings;
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
if (rc)
goto err_out;
bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
ring->fw_ring_id);
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
err_out:
......@@ -7020,6 +7096,50 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
return 0;
}
static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
{
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
u32 cmpl_ring_id;
if (ring->fw_ring_id == INVALID_HW_RING_ID)
return;
cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
{
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
u32 type, cmpl_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
if (ring->fw_ring_id == INVALID_HW_RING_ID)
return;
cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
......@@ -7044,42 +7164,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[grp_idx].rx_fw_ring_id =
INVALID_HW_RING_ID;
}
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[grp_idx].agg_fw_ring_id =
INVALID_HW_RING_ID;
}
bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
}
/* The completion rings are about to be freed. After that the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment