Commit 947c54c3 authored by Shay Agroskin's avatar Shay Agroskin Committed by David S. Miller

net: ena: Use dev_alloc() in RX buffer allocation

Use dev_alloc() when allocating RX buffers instead of specifying the
allocation flags explicitly. This result in same behaviour with less
code.

Also move the page allocation and its DMA mapping into a function. This
creates a logical block, which may help understanding the code.
Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Reported-by: default avatarkernel test robot <lkp@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9e8afb05
...@@ -975,8 +975,37 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) ...@@ -975,8 +975,37 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
ena_free_rx_resources(adapter, i); ena_free_rx_resources(adapter, i);
} }
static int ena_alloc_rx_page(struct ena_ring *rx_ring, struct page *ena_alloc_map_page(struct ena_ring *rx_ring, dma_addr_t *dma)
struct ena_rx_buffer *rx_info, gfp_t gfp) {
struct page *page;
/* This would allocate the page on the same NUMA node the executing code
* is running on.
*/
page = dev_alloc_page();
if (!page) {
ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
&rx_ring->syncp);
return ERR_PTR(-ENOSPC);
}
/* To enable NIC-side port-mirroring, AKA SPAN port,
* we make the buffer readable from the nic as well
*/
*dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
&rx_ring->syncp);
__free_page(page);
return ERR_PTR(-EIO);
}
return page;
}
static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info)
{ {
int headroom = rx_ring->rx_headroom; int headroom = rx_ring->rx_headroom;
struct ena_com_buf *ena_buf; struct ena_com_buf *ena_buf;
...@@ -991,25 +1020,11 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, ...@@ -991,25 +1020,11 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
if (unlikely(rx_info->page)) if (unlikely(rx_info->page))
return 0; return 0;
page = alloc_page(gfp); /* We handle DMA here */
if (unlikely(!page)) { page = ena_alloc_map_page(rx_ring, &dma);
ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, if (unlikely(IS_ERR(page)))
&rx_ring->syncp); return PTR_ERR(page);
return -ENOMEM;
}
/* To enable NIC-side port-mirroring, AKA SPAN port,
* we make the buffer readable from the nic as well
*/
dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
&rx_ring->syncp);
__free_page(page);
return -EIO;
}
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"Allocate page %p, rx_info %p\n", page, rx_info); "Allocate page %p, rx_info %p\n", page, rx_info);
...@@ -1065,8 +1080,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) ...@@ -1065,8 +1080,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rx_info = &rx_ring->rx_buffer_info[req_id]; rx_info = &rx_ring->rx_buffer_info[req_id];
rc = ena_alloc_rx_page(rx_ring, rx_info, rc = ena_alloc_rx_buffer(rx_ring, rx_info);
GFP_ATOMIC | __GFP_COMP);
if (unlikely(rc < 0)) { if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate buffer for rx queue %d\n", "Failed to allocate buffer for rx queue %d\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment