Commit 16f50cda authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: use a counter instead of log message for allocation failures

Add a counter incremented when allocation of replacement
RX page fails.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarSimon Horman <simon.horman@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 790a3991
...@@ -394,6 +394,7 @@ struct nfp_net_rx_ring { ...@@ -394,6 +394,7 @@ struct nfp_net_rx_ring {
* @tx_lso: Counter of LSO packets sent * @tx_lso: Counter of LSO packets sent
* @tx_errors: How many TX errors were encountered * @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)? * @tx_busy: How often was TX busy (no space)?
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
* @irq_vector: Interrupt vector number (use for talking to the OS) * @irq_vector: Interrupt vector number (use for talking to the OS)
* @handler: Interrupt handler for this ring vector * @handler: Interrupt handler for this ring vector
* @name: Name of the interrupt vector * @name: Name of the interrupt vector
...@@ -437,6 +438,8 @@ struct nfp_net_r_vector { ...@@ -437,6 +438,8 @@ struct nfp_net_r_vector {
u64 hw_csum_tx_inner; u64 hw_csum_tx_inner;
u64 tx_gather; u64 tx_gather;
u64 tx_lso; u64 tx_lso;
u64 rx_replace_buf_alloc_fail;
u64 tx_errors; u64 tx_errors;
u64 tx_busy; u64 tx_busy;
......
...@@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) ...@@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
if (!dp->xdp_prog) { if (!dp->xdp_prog) {
frag = napi_alloc_frag(dp->fl_bufsz); frag = napi_alloc_frag(dp->fl_bufsz);
if (unlikely(!frag))
return NULL;
} else { } else {
struct page *page; struct page *page;
page = dev_alloc_page(); page = dev_alloc_page();
frag = page ? page_address(page) : NULL; if (unlikely(!page))
} return NULL;
if (!frag) { frag = page_address(page);
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
} }
*dma_addr = nfp_net_dma_map_rx(dp, frag); *dma_addr = nfp_net_dma_map_rx(dp, frag);
...@@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, ...@@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
{ {
u64_stats_update_begin(&r_vec->rx_sync); u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_drops++; r_vec->rx_drops++;
/* If we have both skb and rxbuf the replacement buffer allocation
* must have failed, count this as an alloc failure.
*/
if (skb && rxbuf)
r_vec->rx_replace_buf_alloc_fail++;
u64_stats_update_end(&r_vec->rx_sync); u64_stats_update_end(&r_vec->rx_sync);
/* skb is build based on the frag, free_skb() would free the frag /* skb is build based on the frag, free_skb() would free the frag
......
...@@ -181,7 +181,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { ...@@ -181,7 +181,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9 #define NN_ET_SWITCH_STATS_LEN 9
#define NN_ET_RVEC_GATHER_STATS 7 #define NN_ET_RVEC_GATHER_STATS 8
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{ {
...@@ -444,6 +444,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) ...@@ -444,6 +444,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_ok"); data = nfp_pr_et(data, "hw_rx_csum_ok");
data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
data = nfp_pr_et(data, "hw_rx_csum_err"); data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
data = nfp_pr_et(data, "hw_tx_csum"); data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum"); data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather"); data = nfp_pr_et(data, "tx_gather");
...@@ -468,16 +469,17 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) ...@@ -468,16 +469,17 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error; tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do { do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts; data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy; data[2] = nn->r_vecs[i].tx_busy;
tmp[3] = nn->r_vecs[i].hw_csum_tx; tmp[4] = nn->r_vecs[i].hw_csum_tx;
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; tmp[5] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[i].tx_gather; tmp[6] = nn->r_vecs[i].tx_gather;
tmp[6] = nn->r_vecs[i].tx_lso; tmp[7] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += 3; data += 3;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment