Commit 8031612d authored by Michal Schmidt's avatar Michal Schmidt Committed by David S. Miller

bnx2x: fix DMA API usage

With CONFIG_DMA_API_DEBUG=y bnx2x triggers the error "DMA-API: device
driver frees DMA memory with wrong function".
On archs where PAGE_SIZE > SGE_PAGE_SIZE it also triggers "DMA-API:
device driver frees DMA memory with different size".

Fix this by making the mapping and unmapping symmetric:
 - Do not map the whole pool page at once. Instead map the
   SGE_PAGE_SIZE-sized pieces individually, so they can be unmapped in
   the same manner.
 - What's mapped using dma_map_page() must be unmapped using
   dma_unmap_page().

Tested on ppc64.

Fixes: 4cace675 ("bnx2x: Alloc 4k fragment for each rx ring buffer element")
Signed-off-by: default avatarMichal Schmidt <mschmidt@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0f8b6cea
...@@ -530,7 +530,6 @@ enum bnx2x_tpa_mode_t { ...@@ -530,7 +530,6 @@ enum bnx2x_tpa_mode_t {
struct bnx2x_alloc_pool { struct bnx2x_alloc_pool {
struct page *page; struct page *page;
dma_addr_t dma;
unsigned int offset; unsigned int offset;
}; };
......
...@@ -563,23 +563,20 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -563,23 +563,20 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return -ENOMEM; return -ENOMEM;
} }
pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev,
pool->dma))) {
__free_pages(pool->page, PAGES_PER_SGE_SHIFT);
pool->page = NULL;
BNX2X_ERR("Can't map sge\n");
return -ENOMEM;
}
pool->offset = 0; pool->offset = 0;
} }
mapping = dma_map_page(&bp->pdev->dev, pool->page,
pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
BNX2X_ERR("Can't map sge\n");
return -ENOMEM;
}
get_page(pool->page); get_page(pool->page);
sw_buf->page = pool->page; sw_buf->page = pool->page;
sw_buf->offset = pool->offset; sw_buf->offset = pool->offset;
mapping = pool->dma + sw_buf->offset;
dma_unmap_addr_set(sw_buf, mapping, mapping); dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping)); sge->addr_hi = cpu_to_le32(U64_HI(mapping));
...@@ -648,9 +645,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -648,9 +645,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return err; return err;
} }
dma_unmap_single(&bp->pdev->dev, dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping), dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGE_SIZE, DMA_FROM_DEVICE); SGE_PAGE_SIZE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */ /* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO) if (fp->mode == TPA_MODE_LRO)
skb_fill_page_desc(skb, j, old_rx_pg.page, skb_fill_page_desc(skb, j, old_rx_pg.page,
......
...@@ -807,8 +807,8 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, ...@@ -807,8 +807,8 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
/* Since many fragments can share the same page, make sure to /* Since many fragments can share the same page, make sure to
* only unmap and free the page once. * only unmap and free the page once.
*/ */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
SGE_PAGE_SIZE, DMA_FROM_DEVICE); SGE_PAGE_SIZE, DMA_FROM_DEVICE);
put_page(page); put_page(page);
...@@ -974,14 +974,6 @@ static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, ...@@ -974,14 +974,6 @@ static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
if (!pool->page) if (!pool->page)
return; return;
/* Page was not fully fragmented. Unmap unused space */
if (pool->offset < PAGE_SIZE) {
dma_addr_t dma = pool->dma + pool->offset;
int size = PAGE_SIZE - pool->offset;
dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
}
put_page(pool->page); put_page(pool->page);
pool->page = NULL; pool->page = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment