Commit ba5ca784 authored by Ivan Vecera's avatar Ivan Vecera Committed by David S. Miller

bna: check for dma mapping errors

Check for DMA mapping errors, recover from them and register them in
ethtool stats like other errors.

Cc: Rasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: default avatarIvan Vecera <ivecera@redhat.com>
Acked-by: default avatarRasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c2e7204d
...@@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, ...@@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rcb->id = 0; q0->rcb->id = 0;
q0->rx_packets = q0->rx_bytes = 0; q0->rx_packets = q0->rx_bytes = 0;
q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
q0->rxbuf_map_failed = 0;
bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
...@@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, ...@@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
: rx_cfg->q1_buf_size; : rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0; q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
q1->rxbuf_map_failed = 0;
bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
&hqpt_mem[i], &hsqpt_mem[i], &hqpt_mem[i], &hsqpt_mem[i],
......
...@@ -587,6 +587,7 @@ struct bna_rxq { ...@@ -587,6 +587,7 @@ struct bna_rxq {
u64 rx_bytes; u64 rx_bytes;
u64 rx_packets_with_error; u64 rx_packets_with_error;
u64 rxbuf_alloc_failed; u64 rxbuf_alloc_failed;
u64 rxbuf_map_failed;
}; };
/* RxQ pair */ /* RxQ pair */
......
...@@ -400,6 +400,12 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) ...@@ -400,6 +400,12 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
unmap_q->map_size, DMA_FROM_DEVICE); unmap_q->map_size, DMA_FROM_DEVICE);
if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
put_page(page);
BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
rcb->rxq->rxbuf_map_failed++;
goto finishing;
}
unmap->page = page; unmap->page = page;
unmap->page_offset = page_offset; unmap->page_offset = page_offset;
...@@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) ...@@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
rcb->rxq->rxbuf_alloc_failed++; rcb->rxq->rxbuf_alloc_failed++;
goto finishing; goto finishing;
} }
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
buff_sz, DMA_FROM_DEVICE); buff_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
rcb->rxq->rxbuf_map_failed++;
goto finishing;
}
unmap->skb = skb; unmap->skb = skb;
dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
...@@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap = head_unmap; unmap = head_unmap;
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
len, DMA_TO_DEVICE); len, DMA_TO_DEVICE);
if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
return NETDEV_TX_OK;
}
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
txqent->vector[0].length = htons(len); txqent->vector[0].length = htons(len);
dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
...@@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE); 0, size, DMA_TO_DEVICE);
if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
tcb->producer_index);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
return NETDEV_TX_OK;
}
dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
txqent->vector[vect_id].length = htons(size); txqent->vector[vect_id].length = htons(size);
......
...@@ -175,6 +175,7 @@ struct bnad_drv_stats { ...@@ -175,6 +175,7 @@ struct bnad_drv_stats {
u64 tx_skb_headlen_zero; u64 tx_skb_headlen_zero;
u64 tx_skb_frag_zero; u64 tx_skb_frag_zero;
u64 tx_skb_len_mismatch; u64 tx_skb_len_mismatch;
u64 tx_skb_map_failed;
u64 hw_stats_updates; u64 hw_stats_updates;
u64 netif_rx_dropped; u64 netif_rx_dropped;
...@@ -189,6 +190,7 @@ struct bnad_drv_stats { ...@@ -189,6 +190,7 @@ struct bnad_drv_stats {
u64 rx_unmap_q_alloc_failed; u64 rx_unmap_q_alloc_failed;
u64 rxbuf_alloc_failed; u64 rxbuf_alloc_failed;
u64 rxbuf_map_failed;
}; };
/* Complete driver stats */ /* Complete driver stats */
......
...@@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { ...@@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_skb_headlen_zero", "tx_skb_headlen_zero",
"tx_skb_frag_zero", "tx_skb_frag_zero",
"tx_skb_len_mismatch", "tx_skb_len_mismatch",
"tx_skb_map_failed",
"hw_stats_updates", "hw_stats_updates",
"netif_rx_dropped", "netif_rx_dropped",
...@@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { ...@@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_unmap_q_alloc_failed", "tx_unmap_q_alloc_failed",
"rx_unmap_q_alloc_failed", "rx_unmap_q_alloc_failed",
"rxbuf_alloc_failed", "rxbuf_alloc_failed",
"rxbuf_map_failed",
"mac_stats_clr_cnt", "mac_stats_clr_cnt",
"mac_frame_64", "mac_frame_64",
...@@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) ...@@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
rx_packets_with_error; rx_packets_with_error;
buf[bi++] = rcb->rxq-> buf[bi++] = rcb->rxq->
rxbuf_alloc_failed; rxbuf_alloc_failed;
buf[bi++] = rcb->rxq->rxbuf_map_failed;
buf[bi++] = rcb->producer_index; buf[bi++] = rcb->producer_index;
buf[bi++] = rcb->consumer_index; buf[bi++] = rcb->consumer_index;
} }
...@@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) ...@@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
rx_packets_with_error; rx_packets_with_error;
buf[bi++] = rcb->rxq-> buf[bi++] = rcb->rxq->
rxbuf_alloc_failed; rxbuf_alloc_failed;
buf[bi++] = rcb->rxq->rxbuf_map_failed;
buf[bi++] = rcb->producer_index; buf[bi++] = rcb->producer_index;
buf[bi++] = rcb->consumer_index; buf[bi++] = rcb->consumer_index;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment