Commit b3b83c3f authored by Dmitry Kravkov's avatar Dmitry Kravkov Committed by David S. Miller

bnx2x: improve memory handling, low memory recovery flows

Signed-off-by: default avatarDmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 426b9241
...@@ -473,7 +473,8 @@ struct bnx2x_fastpath { ...@@ -473,7 +473,8 @@ struct bnx2x_fastpath {
#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD (NUM_RX_BD - 1) #define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
#define MIN_RX_AVAIL 128 #define MIN_RX_SIZE_TPA 72
#define MIN_RX_SIZE_NONTPA 10
#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL #define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
#define INIT_RX_RING_SIZE MAX_RX_AVAIL #define INIT_RX_RING_SIZE MAX_RX_AVAIL
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
......
This diff is collapsed.
...@@ -25,6 +25,39 @@ ...@@ -25,6 +25,39 @@
extern int num_queues; extern int num_queues;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
do { \
if (x) { \
dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
x = NULL; \
y = 0; \
} \
} while (0)
#define BNX2X_FREE(x) \
do { \
if (x) { \
kfree((void *)x); \
x = NULL; \
} \
} while (0)
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
memset((void *)x, 0, size); \
} while (0)
#define BNX2X_ALLOC(x, size) \
do { \
x = kzalloc(size, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
} while (0)
/*********************** Interfaces **************************** /*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version * Functions that need to be implemented by each driver version
*/ */
...@@ -378,6 +411,9 @@ int bnx2x_resume(struct pci_dev *pdev); ...@@ -378,6 +411,9 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */ /* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp); void bnx2x_free_irq(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp); void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp); void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
...@@ -884,6 +920,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, ...@@ -884,6 +920,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{ {
int i; int i;
if (fp->disable_tpa)
return;
for (i = 0; i < last; i++) for (i = 0; i < last; i++)
bnx2x_free_rx_sge(bp, fp, i); bnx2x_free_rx_sge(bp, fp, i);
} }
...@@ -912,13 +951,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, ...@@ -912,13 +951,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
} }
} }
static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp)
static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{ {
int i, j; int i;
for_each_tx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 1; i <= NUM_TX_RINGS; i++) { for (i = 1; i <= NUM_TX_RINGS; i++) {
struct eth_tx_next_bd *tx_next_bd = struct eth_tx_next_bd *tx_next_bd =
...@@ -941,7 +976,14 @@ static inline void bnx2x_init_tx_rings(struct bnx2x *bp) ...@@ -941,7 +976,14 @@ static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
fp->tx_bd_prod = 0; fp->tx_bd_prod = 0;
fp->tx_bd_cons = 0; fp->tx_bd_cons = 0;
fp->tx_pkt = 0; fp->tx_pkt = 0;
} }
static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i;
for_each_tx_queue(bp, i)
bnx2x_init_tx_ring_one(&bp->fp[i]);
} }
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
...@@ -996,6 +1038,44 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) ...@@ -996,6 +1038,44 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
} }
} }
/* Returns the number of actually allocated BDs */
static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
int rx_ring_size)
{
struct bnx2x *bp = fp->bp;
u16 ring_prod, cqe_ring_prod;
int i;
fp->rx_comp_cons = 0;
cqe_ring_prod = ring_prod = 0;
/* This routine is called only during fo init so
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
fp->eth_q_stats.rx_skb_alloc_failed++;
continue;
}
ring_prod = NEXT_RX_IDX(ring_prod);
cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
}
if (fp->eth_q_stats.rx_skb_alloc_failed)
BNX2X_ERR("was only able to allocate "
"%d rx skbs on queue[%d]\n",
(i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
fp->rx_bd_prod = ring_prod;
/* Limit the CQE producer by the CQE ring size */
fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
return i - fp->eth_q_stats.rx_skb_alloc_failed;
}
#ifdef BCM_CNIC #ifdef BCM_CNIC
static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
{ {
......
...@@ -1220,7 +1220,8 @@ static int bnx2x_set_ringparam(struct net_device *dev, ...@@ -1220,7 +1220,8 @@ static int bnx2x_set_ringparam(struct net_device *dev,
} }
if ((ering->rx_pending > MAX_RX_AVAIL) || if ((ering->rx_pending > MAX_RX_AVAIL) ||
(ering->rx_pending < MIN_RX_AVAIL) || (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA)) ||
(ering->tx_pending > MAX_TX_AVAIL) || (ering->tx_pending > MAX_TX_AVAIL) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4)) (ering->tx_pending <= MAX_SKB_FRAGS + 4))
return -EINVAL; return -EINVAL;
......
...@@ -4447,7 +4447,7 @@ static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx) ...@@ -4447,7 +4447,7 @@ static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
fp->state = BNX2X_FP_STATE_CLOSED; fp->state = BNX2X_FP_STATE_CLOSED;
fp->index = fp->cid = fp_idx; fp->cid = fp_idx;
fp->cl_id = BP_L_ID(bp) + fp_idx; fp->cl_id = BP_L_ID(bp) + fp_idx;
fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE; fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE; fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
...@@ -4559,9 +4559,11 @@ static int bnx2x_gunzip_init(struct bnx2x *bp) ...@@ -4559,9 +4559,11 @@ static int bnx2x_gunzip_init(struct bnx2x *bp)
static void bnx2x_gunzip_end(struct bnx2x *bp) static void bnx2x_gunzip_end(struct bnx2x *bp)
{ {
if (bp->strm) {
kfree(bp->strm->workspace); kfree(bp->strm->workspace);
kfree(bp->strm); kfree(bp->strm);
bp->strm = NULL; bp->strm = NULL;
}
if (bp->gunzip_buf) { if (bp->gunzip_buf) {
dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
...@@ -5869,9 +5871,6 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) ...@@ -5869,9 +5871,6 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
bp->dmae_ready = 0; bp->dmae_ready = 0;
spin_lock_init(&bp->dmae_lock); spin_lock_init(&bp->dmae_lock);
rc = bnx2x_gunzip_init(bp);
if (rc)
return rc;
switch (load_code) { switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_COMMON: case FW_MSG_CODE_DRV_LOAD_COMMON:
...@@ -5915,80 +5914,10 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) ...@@ -5915,80 +5914,10 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
void bnx2x_free_mem(struct bnx2x *bp) void bnx2x_free_mem(struct bnx2x *bp)
{ {
bnx2x_gunzip_end(bp);
#define BNX2X_PCI_FREE(x, y, size) \
do { \
if (x) { \
dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
x = NULL; \
y = 0; \
} \
} while (0)
#define BNX2X_FREE(x) \
do { \
if (x) { \
kfree((void *)x); \
x = NULL; \
} \
} while (0)
int i;
/* fastpath */ /* fastpath */
/* Common */ bnx2x_free_fp_mem(bp);
for_each_queue(bp, i) {
#ifdef BCM_CNIC
/* FCoE client uses default status block */
if (IS_FCOE_IDX(i)) {
union host_hc_status_block *sb =
&bnx2x_fp(bp, i, status_blk);
memset(sb, 0, sizeof(union host_hc_status_block));
bnx2x_fp(bp, i, status_blk_mapping) = 0;
} else {
#endif
/* status blocks */
if (CHIP_IS_E2(bp))
BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
bnx2x_fp(bp, i, status_blk_mapping),
sizeof(struct host_hc_status_block_e2));
else
BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
bnx2x_fp(bp, i, status_blk_mapping),
sizeof(struct host_hc_status_block_e1x));
#ifdef BCM_CNIC
}
#endif
}
/* Rx */
for_each_rx_queue(bp, i) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
bnx2x_fp(bp, i, rx_desc_mapping),
sizeof(struct eth_rx_bd) * NUM_RX_BD);
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
bnx2x_fp(bp, i, rx_comp_mapping),
sizeof(struct eth_fast_path_rx_cqe) *
NUM_RCQ_BD);
/* SGE ring */
BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
bnx2x_fp(bp, i, rx_sge_mapping),
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
}
/* Tx */
for_each_tx_queue(bp, i) {
/* fastpath tx rings: tx_buf tx_desc */
BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
bnx2x_fp(bp, i, tx_desc_mapping),
sizeof(union eth_tx_bd_types) * NUM_TX_BD);
}
/* end of fastpath */ /* end of fastpath */
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
...@@ -6021,101 +5950,13 @@ void bnx2x_free_mem(struct bnx2x *bp) ...@@ -6021,101 +5950,13 @@ void bnx2x_free_mem(struct bnx2x *bp)
BCM_PAGE_SIZE * NUM_EQ_PAGES); BCM_PAGE_SIZE * NUM_EQ_PAGES);
BNX2X_FREE(bp->rx_indir_table); BNX2X_FREE(bp->rx_indir_table);
#undef BNX2X_PCI_FREE
#undef BNX2X_KFREE
} }
static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
{
union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
if (CHIP_IS_E2(bp)) {
bnx2x_fp(bp, index, sb_index_values) =
(__le16 *)status_blk.e2_sb->sb.index_values;
bnx2x_fp(bp, index, sb_running_index) =
(__le16 *)status_blk.e2_sb->sb.running_index;
} else {
bnx2x_fp(bp, index, sb_index_values) =
(__le16 *)status_blk.e1x_sb->sb.index_values;
bnx2x_fp(bp, index, sb_running_index) =
(__le16 *)status_blk.e1x_sb->sb.running_index;
}
}
int bnx2x_alloc_mem(struct bnx2x *bp) int bnx2x_alloc_mem(struct bnx2x *bp)
{ {
#define BNX2X_PCI_ALLOC(x, y, size) \ if (bnx2x_gunzip_init(bp))
do { \ return -ENOMEM;
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
memset(x, 0, size); \
} while (0)
#define BNX2X_ALLOC(x, size) \
do { \
x = kzalloc(size, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
} while (0)
int i;
/* fastpath */
/* Common */
for_each_queue(bp, i) {
union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
bnx2x_fp(bp, i, bp) = bp;
/* status blocks */
#ifdef BCM_CNIC
if (!IS_FCOE_IDX(i)) {
#endif
if (CHIP_IS_E2(bp))
BNX2X_PCI_ALLOC(sb->e2_sb,
&bnx2x_fp(bp, i, status_blk_mapping),
sizeof(struct host_hc_status_block_e2));
else
BNX2X_PCI_ALLOC(sb->e1x_sb,
&bnx2x_fp(bp, i, status_blk_mapping),
sizeof(struct host_hc_status_block_e1x));
#ifdef BCM_CNIC
}
#endif
set_sb_shortcuts(bp, i);
}
/* Rx */
for_each_queue(bp, i) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
sizeof(struct sw_rx_bd) * NUM_RX_BD);
BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
&bnx2x_fp(bp, i, rx_desc_mapping),
sizeof(struct eth_rx_bd) * NUM_RX_BD);
BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
&bnx2x_fp(bp, i, rx_comp_mapping),
sizeof(struct eth_fast_path_rx_cqe) *
NUM_RCQ_BD);
/* SGE ring */
BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
sizeof(struct sw_rx_page) * NUM_RX_SGE);
BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
&bnx2x_fp(bp, i, rx_sge_mapping),
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
}
/* Tx */
for_each_queue(bp, i) {
/* fastpath tx rings: tx_buf tx_desc */
BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
sizeof(struct sw_tx_bd) * NUM_TX_BD);
BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
&bnx2x_fp(bp, i, tx_desc_mapping),
sizeof(union eth_tx_bd_types) * NUM_TX_BD);
}
/* end of fastpath */
#ifdef BCM_CNIC #ifdef BCM_CNIC
if (CHIP_IS_E2(bp)) if (CHIP_IS_E2(bp))
...@@ -6155,14 +5996,18 @@ int bnx2x_alloc_mem(struct bnx2x *bp) ...@@ -6155,14 +5996,18 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) * BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
TSTORM_INDIRECTION_TABLE_SIZE); TSTORM_INDIRECTION_TABLE_SIZE);
/* fastpath */
/* need to be done at the end, since it's self adjusting to amount
* of memory available for RSS queues
*/
if (bnx2x_alloc_fp_mem(bp))
goto alloc_mem_err;
return 0; return 0;
alloc_mem_err: alloc_mem_err:
bnx2x_free_mem(bp); bnx2x_free_mem(bp);
return -ENOMEM; return -ENOMEM;
#undef BNX2X_PCI_ALLOC
#undef BNX2X_ALLOC
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment