Commit 63abf14e authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: XDP, Allow non-linear single-segment frames in XDP TX MPWQE

Under a few restrictions, TX MPWQE feature can serve multiple TX packets
in a single TX descriptor. It requires each of the packets to have a
single scatter entry / segment.

Today we allow only linear frames to use this feature, although there's
no real problem with non-linear ones where the whole packet reside in
the first fragment.

Expand the XDP TX MPWQE feature support to include such frames. This is
in preparation for the downstream patch, in which we will generate such
non-linear frames.
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 124d0d8d
...@@ -405,18 +405,35 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx ...@@ -405,18 +405,35 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
{ {
struct mlx5e_tx_mpwqe *session = &sq->mpwqe; struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5e_xmit_data *p = xdptxd;
struct mlx5e_xmit_data tmp;
if (xdptxd->has_frags) { if (xdptxd->has_frags) {
/* MPWQE is enabled, but a multi-buffer packet is queued for struct mlx5e_xmit_data_frags *xdptxdf =
* transmission. MPWQE can't send fragmented packets, so close container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
* the current session and fall back to a regular WQE.
*/ if (!!xdptxd->len + xdptxdf->sinfo->nr_frags > 1) {
if (unlikely(sq->mpwqe.wqe)) /* MPWQE is enabled, but a multi-buffer packet is queued for
mlx5e_xdp_mpwqe_complete(sq); * transmission. MPWQE can't send fragmented packets, so close
return mlx5e_xmit_xdp_frame(sq, xdptxd, 0); * the current session and fall back to a regular WQE.
*/
if (unlikely(sq->mpwqe.wqe))
mlx5e_xdp_mpwqe_complete(sq);
return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
}
if (!xdptxd->len) {
skb_frag_t *frag = &xdptxdf->sinfo->frags[0];
tmp.data = skb_frag_address(frag);
tmp.len = skb_frag_size(frag);
tmp.dma_addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[0] :
page_pool_get_dma_addr(skb_frag_page(frag)) +
skb_frag_off(frag);
p = &tmp;
}
} }
if (unlikely(xdptxd->len > sq->hw_mtu)) { if (unlikely(p->len > sq->hw_mtu)) {
stats->err++; stats->err++;
return false; return false;
} }
...@@ -434,7 +451,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx ...@@ -434,7 +451,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_session_start(sq); mlx5e_xdp_mpwqe_session_start(sq);
} }
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs))) if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdp_mpwqe_complete(sq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment