Commit 0b482232 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Remove the outer loop when allocating legacy RQ WQEs

Legacy RQ WQEs are allocated in a loop in small batches (8 WQEs). As
partial batches are allowed, there is no point to have a loop in a loop,
so the outer loop is removed, and the batch size is increased up to the
total number of WQEs to allocate, still not smaller than 8.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 3f5fe0b2
......@@ -424,7 +424,7 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
mlx5e_free_rx_wqe(rq, wi, false);
}
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
int i;
......@@ -805,38 +805,33 @@ static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
int wqe_bulk, count;
bool busy = false;
u8 wqe_bulk;
u16 head;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
wqe_bulk = rq->wqe.info.wqe_bulk;
if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
return false;
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
do {
u16 head = mlx5_wq_cyc_get_head(wq);
int count;
u8 bulk;
wqe_bulk = mlx5_wq_cyc_missing(wq);
head = mlx5_wq_cyc_get_head(wq);
/* Don't allow any newly allocated WQEs to share the same page
* with old WQEs that aren't completed yet. Stop earlier.
/* Don't allow any newly allocated WQEs to share the same page with old
* WQEs that aren't completed yet. Stop earlier.
*/
bulk = wqe_bulk - ((head + wqe_bulk) & rq->wqe.info.wqe_index_mask);
wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
count = mlx5e_alloc_rx_wqes(rq, head, bulk);
count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
mlx5_wq_cyc_push_n(wq, count);
if (unlikely(count != bulk)) {
if (unlikely(count != wqe_bulk)) {
rq->stats->buff_alloc_err++;
busy = true;
break;
}
} while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
......
......@@ -123,7 +123,7 @@ static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq)
wq->cur_sz++;
}
static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n)
static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u16 n)
{
wq->wqe_ctr += n;
wq->cur_sz += n;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment