Commit 8515c581 authored by Or Gerlitz's avatar Or Gerlitz Committed by David S. Miller

net/mlx5e: Refactor retrival of skb from rx completion element (cqe)

Factor the relevant code into a static inline helper (skb_from_cqe)
doing that.

Move the call to napi_gro_receive to be carried out just
after mlx5e_complete_rx_cqe returns.

Both changes are to be used for the VF representor as well
in the next commit.

This patch doesn't change any functionality.
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 776b12b6
...@@ -629,7 +629,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -629,7 +629,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
rq->stats.packets++; rq->stats.packets++;
rq->stats.bytes += cqe_bcnt; rq->stats.bytes += cqe_bcnt;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
napi_gro_receive(rq->cq.napi, skb);
} }
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
...@@ -733,20 +732,15 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, ...@@ -733,20 +732,15 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
} }
} }
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static inline
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 wqe_counter, u32 cqe_bcnt)
{ {
struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog); struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
struct mlx5e_dma_info *di; struct mlx5e_dma_info *di;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
struct sk_buff *skb; struct sk_buff *skb;
u16 wqe_counter;
void *va, *data; void *va, *data;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
di = &rq->dma_info[wqe_counter]; di = &rq->dma_info[wqe_counter];
va = page_address(di->page); va = page_address(di->page);
data = va + MLX5_RX_HEADROOM; data = va + MLX5_RX_HEADROOM;
...@@ -757,22 +751,21 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -757,22 +751,21 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq->buff.wqe_sz, rq->buff.wqe_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
prefetch(data); prefetch(data);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
rq->stats.wqe_err++; rq->stats.wqe_err++;
mlx5e_page_release(rq, di, true); mlx5e_page_release(rq, di, true);
goto wq_ll_pop; return NULL;
} }
if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt)) if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
goto wq_ll_pop; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
skb = build_skb(va, RQ_PAGE_SIZE(rq)); skb = build_skb(va, RQ_PAGE_SIZE(rq));
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats.buff_alloc_err++; rq->stats.buff_alloc_err++;
mlx5e_page_release(rq, di, true); mlx5e_page_release(rq, di, true);
goto wq_ll_pop; return NULL;
} }
/* queue up for recycling ..*/ /* queue up for recycling ..*/
...@@ -782,7 +775,28 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -782,7 +775,28 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb_reserve(skb, MLX5_RX_HEADROOM); skb_reserve(skb, MLX5_RX_HEADROOM);
skb_put(skb, cqe_bcnt); skb_put(skb, cqe_bcnt);
return skb;
}
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
struct sk_buff *skb;
u16 wqe_counter;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
if (!skb)
goto wq_ll_pop;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
wq_ll_pop: wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
...@@ -861,6 +875,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -861,6 +875,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out: mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe_num_strides)) if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment