Commit 29b006a6 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller

mlx5: more strict use of page_pool API

The mlx5 driver is using page_pool, but not for DMA-mapping (currently), and
is a little too relaxed about returning or releasing page resources, as it
is not strictly necessary, when not using DMA-mappings.

As this patchset is working towards tracking page_pool resources, to know
about in-flight frames on shutdown. Then fix places where mlx5 leak
page_pool resource.

In case of dma_mapping_error, then recycle into page_pool.

In mlx5e_free_rq() moved the page_pool_destroy() call to after the
mlx5e_page_release() calls, as it is more correct.

In mlx5e_page_release() when no recycle was requested, then release page
from the page_pool, via page_pool_release_page().
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e54cfd7e
...@@ -625,10 +625,6 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) ...@@ -625,10 +625,6 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
if (rq->xdp_prog) if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog); bpf_prog_put(rq->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
if (rq->page_pool)
page_pool_destroy(rq->page_pool);
switch (rq->wq_type) { switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info); kvfree(rq->mpwqe.info);
...@@ -645,6 +641,11 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) ...@@ -645,6 +641,11 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5e_page_release(rq, dma_info, false); mlx5e_page_release(rq, dma_info, false);
} }
xdp_rxq_info_unreg(&rq->xdp_rxq);
if (rq->page_pool)
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl); mlx5_wq_destroy(&rq->wq_ctrl);
} }
......
...@@ -248,7 +248,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, ...@@ -248,7 +248,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
PAGE_SIZE, rq->buff.map_dir); PAGE_SIZE, rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(dma_info->page); page_pool_recycle_direct(rq->page_pool, dma_info->page);
dma_info->page = NULL; dma_info->page = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -272,6 +272,7 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, ...@@ -272,6 +272,7 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
page_pool_recycle_direct(rq->page_pool, dma_info->page); page_pool_recycle_direct(rq->page_pool, dma_info->page);
} else { } else {
mlx5e_page_dma_unmap(rq, dma_info); mlx5e_page_dma_unmap(rq, dma_info);
page_pool_release_page(rq->page_pool, dma_info->page);
put_page(dma_info->page); put_page(dma_info->page);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment