Commit 96d37d86 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Call mlx5e_page_release_dynamic directly where possible

mlx5e_page_release calls the appropriate deallocator depending on
whether it's an XSK RQ or a regular one. Some flows that call this
function are not compatible with XSK, so they can call the non-XSK
deallocator directly to save a branch.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 132857d9
...@@ -588,12 +588,8 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, ...@@ -588,12 +588,8 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
while (--i >= 0) { while (--i >= 0) {
dma_info = &shampo->info[--index]; dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
union mlx5e_alloc_unit au = {
.page = dma_info->page,
};
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
mlx5e_page_release(rq, &au, true); mlx5e_page_release_dynamic(rq, dma_info->page, true);
} }
} }
rq->stats->buff_alloc_err++; rq->stats->buff_alloc_err++;
...@@ -698,7 +694,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -698,7 +694,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_unmap: err_unmap:
while (--i >= 0) { while (--i >= 0) {
au--; au--;
mlx5e_page_release(rq, au, true); mlx5e_page_release_dynamic(rq, au->page, true);
} }
err: err:
...@@ -731,12 +727,8 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close ...@@ -731,12 +727,8 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
hd_info = &shampo->info[index]; hd_info = &shampo->info[index];
hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
if (hd_info->page != deleted_page) { if (hd_info->page != deleted_page) {
union mlx5e_alloc_unit au = {
.page = hd_info->page,
};
deleted_page = hd_info->page; deleted_page = hd_info->page;
mlx5e_page_release(rq, &au, false); mlx5e_page_release_dynamic(rq, hd_info->page, false);
} }
} }
...@@ -2061,12 +2053,8 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) ...@@ -2061,12 +2053,8 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
u64 addr = shampo->info[header_index].addr; u64 addr = shampo->info[header_index].addr;
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
union mlx5e_alloc_unit au = {
.page = shampo->info[header_index].page,
};
shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE); shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE);
mlx5e_page_release(rq, &au, true); mlx5e_page_release_dynamic(rq, shampo->info[header_index].page, true);
} }
bitmap_clear(shampo->bitmap, header_index, 1); bitmap_clear(shampo->bitmap, header_index, 1);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment