Commit dc1a8079 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-rx-ring'

Michael Chan says:

====================
bnxt_en: Increase maximum RX ring size when jumbo ring is unused

The RX jumbo ring is automatically enabled when HW GRO/LRO is enabled or
when the MTU exceeds the page size.  The RX jumbo ring provides a lot
more RX buffer space when it is in use.  When the RX jumbo ring is not
in use, some users report that the current maximum of 2K buffers is
too limiting.  This patchset increases the maximum to 8K buffers when
the RX jumbo ring is not used.  The default RX ring size is unchanged
at 511.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0547ffe6 c1129b51
......@@ -3163,6 +3163,58 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
return 0;
}
static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
{
kfree(cpr->cp_desc_ring);
cpr->cp_desc_ring = NULL;
kfree(cpr->cp_desc_mapping);
cpr->cp_desc_mapping = NULL;
}
static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
{
cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
if (!cpr->cp_desc_ring)
return -ENOMEM;
cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
GFP_KERNEL);
if (!cpr->cp_desc_mapping)
return -ENOMEM;
return 0;
}
static void bnxt_free_all_cp_arrays(struct bnxt *bp)
{
int i;
if (!bp->bnapi)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
if (!bnapi)
continue;
bnxt_free_cp_arrays(&bnapi->cp_ring);
}
}
static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
{
int i, n = bp->cp_nr_pages;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
int rc;
if (!bnapi)
continue;
rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
if (rc)
return rc;
}
return 0;
}
static void bnxt_free_cp_rings(struct bnxt *bp)
{
int i;
......@@ -3190,6 +3242,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
if (cpr2) {
ring = &cpr2->cp_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
bnxt_free_cp_arrays(cpr2);
kfree(cpr2);
cpr->cp_ring_arr[j] = NULL;
}
......@@ -3208,6 +3261,12 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
if (!cpr)
return NULL;
rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
if (rc) {
bnxt_free_cp_arrays(cpr);
kfree(cpr);
return NULL;
}
ring = &cpr->cp_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->cp_nr_pages;
......@@ -3218,6 +3277,7 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
rc = bnxt_alloc_ring(bp, rmem);
if (rc) {
bnxt_free_ring(bp, rmem);
bnxt_free_cp_arrays(cpr);
kfree(cpr);
cpr = NULL;
}
......@@ -3650,9 +3710,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
if (jumbo_factor > agg_factor)
agg_factor = jumbo_factor;
}
if (agg_factor) {
if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
bp->rx_ring_size, ring_size);
bp->rx_ring_size = ring_size;
}
agg_ring_size = ring_size * agg_factor;
if (agg_ring_size) {
bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
RX_DESC_CNT);
if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
......@@ -4253,6 +4319,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
bnxt_free_all_cp_arrays(bp);
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
......@@ -4373,6 +4440,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
goto alloc_mem_err;
}
rc = bnxt_alloc_all_cp_arrays(bp);
if (rc)
goto alloc_mem_err;
bnxt_init_ring_struct(bp);
rc = bnxt_alloc_rx_rings(bp);
......
......@@ -596,15 +596,17 @@ struct nqe_cn {
#define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16)
#define MAX_RX_PAGES 1
#define MAX_RX_PAGES_AGG_ENA 1
#define MAX_RX_PAGES 4
#define MAX_RX_AGG_PAGES 4
#define MAX_TX_PAGES 1
#define MAX_CP_PAGES 8
#define MAX_CP_PAGES 16
#else
#define MAX_RX_PAGES 8
#define MAX_RX_PAGES_AGG_ENA 8
#define MAX_RX_PAGES 32
#define MAX_RX_AGG_PAGES 32
#define MAX_TX_PAGES 8
#define MAX_CP_PAGES 64
#define MAX_CP_PAGES 128
#endif
#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
......@@ -622,6 +624,7 @@ struct nqe_cn {
#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
#define BNXT_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
......@@ -972,11 +975,11 @@ struct bnxt_cp_ring_info {
struct dim dim;
union {
struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
struct nqe_cn *nq_desc_ring[MAX_CP_PAGES];
struct tx_cmp **cp_desc_ring;
struct nqe_cn **nq_desc_ring;
};
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
dma_addr_t *cp_desc_mapping;
struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
......
......@@ -768,8 +768,13 @@ static void bnxt_get_ringparam(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
} else {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
ering->rx_jumbo_max_pending = 0;
}
ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
ering->rx_pending = bp->rx_ring_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment