Commit 384a13ca authored by Toke Høiland-Jørgensen's avatar Toke Høiland-Jørgensen Committed by Martin KaFai Lau

net/mlx5e: Introduce wrapper for xdp_buff

Preparation for implementing HW metadata kfuncs. No functional change.

Cc: Tariq Toukan <tariqt@nvidia.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Willem de Bruijn <willemb@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Anatoly Burakov <anatoly.burakov@intel.com>
Cc: Alexander Lobakin <alexandr.lobakin@intel.com>
Cc: Magnus Karlsson <magnus.karlsson@gmail.com>
Cc: Maryam Tahhan <mtahhan@redhat.com>
Cc: xdp-hints@xdp-project.net
Cc: netdev@vger.kernel.org
Signed-off-by: default avatarToke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: default avatarStanislav Fomichev <sdf@google.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20230119221536.3349901-16-sdf@google.comSigned-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parent 94ecc5ca
...@@ -158,8 +158,9 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -158,8 +158,9 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
/* returns true if packet was consumed by xdp */ /* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page, bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
struct bpf_prog *prog, struct xdp_buff *xdp) struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
{ {
struct xdp_buff *xdp = &mxbuf->xdp;
u32 act; u32 act;
int err; int err;
......
...@@ -44,10 +44,14 @@ ...@@ -44,10 +44,14 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \ (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg)) sizeof(struct mlx5_wqe_inline_seg))
struct mlx5e_xdp_buff {
struct xdp_buff xdp;
};
struct mlx5e_xsk_param; struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page, bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
struct bpf_prog *prog, struct xdp_buff *xdp); struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
......
...@@ -8,6 +8,14 @@ ...@@ -8,6 +8,14 @@
/* RX data path */ /* RX data path */
static struct mlx5e_xdp_buff *xsk_buff_to_mxbuf(struct xdp_buff *xdp)
{
/* mlx5e_xdp_buff shares its layout with xdp_buff_xsk
* and private mlx5e_xdp_buff fields fall into xdp_buff_xsk->cb
*/
return (struct mlx5e_xdp_buff *)xdp;
}
int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
...@@ -22,6 +30,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -22,6 +30,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err; goto err;
BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk)); BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
XSK_CHECK_PRIV_TYPE(struct mlx5e_xdp_buff);
batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units, batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
rq->mpwqe.pages_per_wqe); rq->mpwqe.pages_per_wqe);
...@@ -233,7 +242,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, ...@@ -233,7 +242,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset, u32 head_offset,
u32 page_idx) u32 page_idx)
{ {
struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk; struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[page_idx].xsk);
struct bpf_prog *prog; struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */ /* Check packet size. Note LRO doesn't use linear SKB */
...@@ -249,9 +258,9 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, ...@@ -249,9 +258,9 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/ */
WARN_ON_ONCE(head_offset); WARN_ON_ONCE(head_offset);
xsk_buff_set_size(xdp, cqe_bcnt); xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
net_prefetch(xdp->data); net_prefetch(mxbuf->xdp.data);
/* Possible flows: /* Possible flows:
* - XDP_REDIRECT to XSKMAP: * - XDP_REDIRECT to XSKMAP:
...@@ -269,7 +278,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, ...@@ -269,7 +278,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/ */
prog = rcu_dereference(rq->xdp_prog); prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) { if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))) if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
...@@ -278,14 +287,14 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, ...@@ -278,14 +287,14 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned. * frame. On SKB allocation failure, NULL is returned.
*/ */
return mlx5e_xsk_construct_skb(rq, xdp); return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
} }
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi, struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt) u32 cqe_bcnt)
{ {
struct xdp_buff *xdp = wi->au->xsk; struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk);
struct bpf_prog *prog; struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the /* wi->offset is not used in this function, because xdp->data and the
...@@ -295,17 +304,17 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, ...@@ -295,17 +304,17 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/ */
WARN_ON_ONCE(wi->offset); WARN_ON_ONCE(wi->offset);
xsk_buff_set_size(xdp, cqe_bcnt); xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
net_prefetch(xdp->data); net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog); prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf)))
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_free_rx_wqe. * will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned. * On SKB allocation failure, NULL is returned.
*/ */
return mlx5e_xsk_construct_skb(rq, xdp); return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
} }
...@@ -1575,11 +1575,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, ...@@ -1575,11 +1575,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb; return skb;
} }
static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp) u32 len, struct mlx5e_xdp_buff *mxbuf)
{ {
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
xdp_prepare_buff(xdp, va, headroom, len, true); xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
} }
static struct sk_buff * static struct sk_buff *
...@@ -1606,16 +1606,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, ...@@ -1606,16 +1606,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog); prog = rcu_dereference(rq->xdp_prog);
if (prog) { if (prog) {
struct xdp_buff xdp; struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */ net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); mlx5e_fill_mxbuf(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf))
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start; rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
metasize = xdp.data - xdp.data_meta; metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
cqe_bcnt = xdp.data_end - xdp.data; cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
} }
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
...@@ -1637,9 +1637,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1637,9 +1637,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
union mlx5e_alloc_unit *au = wi->au; union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom; u16 rx_headroom = rq->buff.headroom;
struct skb_shared_info *sinfo; struct skb_shared_info *sinfo;
struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes; u32 frag_consumed_bytes;
struct bpf_prog *prog; struct bpf_prog *prog;
struct xdp_buff xdp;
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t addr; dma_addr_t addr;
u32 truesize; u32 truesize;
...@@ -1654,8 +1654,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1654,8 +1654,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetchw(va); /* xdp_frame data area */ net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom); net_prefetch(va + rx_headroom);
mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp); mlx5e_fill_mxbuf(rq, va, rx_headroom, frag_consumed_bytes, &mxbuf);
sinfo = xdp_get_shared_info_from_buff(&xdp); sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
truesize = 0; truesize = 0;
cqe_bcnt -= frag_consumed_bytes; cqe_bcnt -= frag_consumed_bytes;
...@@ -1673,13 +1673,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1673,13 +1673,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset, dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, rq->buff.map_dir); frag_consumed_bytes, rq->buff.map_dir);
if (!xdp_buff_has_frags(&xdp)) { if (!xdp_buff_has_frags(&mxbuf.xdp)) {
/* Init on the first fragment to avoid cold cache access /* Init on the first fragment to avoid cold cache access
* when possible. * when possible.
*/ */
sinfo->nr_frags = 0; sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0; sinfo->xdp_frags_size = 0;
xdp_buff_set_frags_flag(&xdp); xdp_buff_set_frags_flag(&mxbuf.xdp);
} }
frag = &sinfo->frags[sinfo->nr_frags++]; frag = &sinfo->frags[sinfo->nr_frags++];
...@@ -1688,7 +1688,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1688,7 +1688,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
skb_frag_size_set(frag, frag_consumed_bytes); skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(au->page)) if (page_is_pfmemalloc(au->page))
xdp_buff_set_frag_pfmemalloc(&xdp); xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);
sinfo->xdp_frags_size += frag_consumed_bytes; sinfo->xdp_frags_size += frag_consumed_bytes;
truesize += frag_info->frag_stride; truesize += frag_info->frag_stride;
...@@ -1701,7 +1701,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1701,7 +1701,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
au = head_wi->au; au = head_wi->au;
prog = rcu_dereference(rq->xdp_prog); prog = rcu_dereference(rq->xdp_prog);
if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) { if (prog && mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i; int i;
...@@ -1711,22 +1711,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1711,22 +1711,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
} }
skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz, skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
xdp.data - xdp.data_hard_start, mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
xdp.data_end - xdp.data, mxbuf.xdp.data_end - mxbuf.xdp.data,
xdp.data - xdp.data_meta); mxbuf.xdp.data - mxbuf.xdp.data_meta);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
page_ref_inc(au->page); page_ref_inc(au->page);
if (unlikely(xdp_buff_has_frags(&xdp))) { if (unlikely(xdp_buff_has_frags(&mxbuf.xdp))) {
int i; int i;
/* sinfo->nr_frags is reset by build_skb, calculate again. */ /* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1, xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize, sinfo->xdp_frags_size, truesize,
xdp_buff_is_frag_pfmemalloc(&xdp)); xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
for (i = 0; i < sinfo->nr_frags; i++) { for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i]; skb_frag_t *frag = &sinfo->frags[i];
...@@ -2007,19 +2007,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ...@@ -2007,19 +2007,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog); prog = rcu_dereference(rq->xdp_prog);
if (prog) { if (prog) {
struct xdp_buff xdp; struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */ net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); mlx5e_fill_mxbuf(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) { if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
} }
rx_headroom = xdp.data - xdp.data_hard_start; rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
metasize = xdp.data - xdp.data_meta; metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
cqe_bcnt = xdp.data_end - xdp.data; cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
} }
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment