Commit 8fb1814f authored by Dragos Tatulea's avatar Dragos Tatulea Committed by Saeed Mahameed

net/mlx5e: RX, Remove alloc unit layout constraint for legacy rq

The mlx5e_alloc_unit union is conveniently used to store arrays of
pointers to struct page or struct xdp_buff (for xsk). The union is
currently expected to have the size of a pointer for xsk batch
allocations to work. This is conveniet for the current state of the
code but makes it impossible to add a structure of a different size
to the alloc unit.

A further patch in the series will add the mlx5e_frag_page struct for
which the described size constraint will no longer hold.

This change removes the usage of mlx5e_alloc_unit union for legacy rq:

- A union of arrays is introduced (mlx5e_alloc_units) to replace the
  array of unions to allow structures of different sizes.

- Each fragment has a pointer to a unit in the mlx5e_alloc_units array.
Signed-off-by: default avatarDragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 09df0370
...@@ -606,11 +606,19 @@ struct mlx5e_icosq { ...@@ -606,11 +606,19 @@ struct mlx5e_icosq {
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info { struct mlx5e_wqe_frag_info {
union mlx5e_alloc_unit *au; union {
struct page **pagep;
struct xdp_buff **xskp;
};
u32 offset; u32 offset;
bool last_in_page; bool last_in_page;
}; };
union mlx5e_alloc_units {
DECLARE_FLEX_ARRAY(struct page *, pages);
DECLARE_FLEX_ARRAY(struct xdp_buff *, xsk_buffs);
};
struct mlx5e_mpw_info { struct mlx5e_mpw_info {
u16 consumed_strides; u16 consumed_strides;
DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE); DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
...@@ -702,7 +710,7 @@ struct mlx5e_rq { ...@@ -702,7 +710,7 @@ struct mlx5e_rq {
struct { struct {
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
struct mlx5e_wqe_frag_info *frags; struct mlx5e_wqe_frag_info *frags;
union mlx5e_alloc_unit *alloc_units; union mlx5e_alloc_units *alloc_units;
struct mlx5e_rq_frags_info info; struct mlx5e_rq_frags_info info;
mlx5e_fp_skb_from_cqe skb_from_cqe; mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe; } wqe;
......
...@@ -163,13 +163,10 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) ...@@ -163,13 +163,10 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
u32 contig, alloc; u32 contig, alloc;
int i; int i;
/* mlx5e_init_frags_partition creates a 1:1 mapping between /* Each rq->wqe.frags->xskp is 1:1 mapped to an element inside the
* rq->wqe.frags and rq->wqe.alloc_units, which allows us to * rq->wqe.alloc_units->xsk_buffs array allocated here.
* allocate XDP buffers straight into alloc_units.
*/ */
BUILD_BUG_ON(sizeof(rq->wqe.alloc_units[0]) != buffs = rq->wqe.alloc_units->xsk_buffs;
sizeof(rq->wqe.alloc_units[0].xsk));
buffs = (struct xdp_buff **)rq->wqe.alloc_units;
contig = mlx5_wq_cyc_get_size(wq) - ix; contig = mlx5_wq_cyc_get_size(wq) - ix;
if (wqe_bulk <= contig) { if (wqe_bulk <= contig) {
alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk); alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk);
...@@ -189,7 +186,7 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) ...@@ -189,7 +186,7 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
/* Assumes log_num_frags == 0. */ /* Assumes log_num_frags == 0. */
frag = &rq->wqe.frags[j]; frag = &rq->wqe.frags[j];
addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk); addr = xsk_buff_xdp_get_frame_dma(*frag->xskp);
wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom); wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
} }
...@@ -211,11 +208,11 @@ int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) ...@@ -211,11 +208,11 @@ int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
/* Assumes log_num_frags == 0. */ /* Assumes log_num_frags == 0. */
frag = &rq->wqe.frags[j]; frag = &rq->wqe.frags[j];
frag->au->xsk = xsk_buff_alloc(rq->xsk_pool); *frag->xskp = xsk_buff_alloc(rq->xsk_pool);
if (unlikely(!frag->au->xsk)) if (unlikely(!*frag->xskp))
return i; return i;
addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk); addr = xsk_buff_xdp_get_frame_dma(*frag->xskp);
wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom); wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
} }
...@@ -306,7 +303,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, ...@@ -306,7 +303,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe, struct mlx5_cqe64 *cqe,
u32 cqe_bcnt) u32 cqe_bcnt)
{ {
struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk); struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(*wi->xskp);
struct bpf_prog *prog; struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the /* wi->offset is not used in this function, because xdp->data and the
......
...@@ -499,15 +499,9 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) ...@@ -499,15 +499,9 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
struct mlx5e_wqe_frag_info *prev = NULL; struct mlx5e_wqe_frag_info *prev = NULL;
int i; int i;
if (rq->xsk_pool) { WARN_ON(rq->xsk_pool);
/* Assumptions used by XSK batched allocator. */
WARN_ON(rq->wqe.info.num_frags != 1);
WARN_ON(rq->wqe.info.log_num_frags != 0);
WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
}
next_frag.au = &rq->wqe.alloc_units[0];
next_frag.pagep = &rq->wqe.alloc_units->pages[0];
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *frag = struct mlx5e_wqe_frag_info *frag =
...@@ -516,7 +510,8 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) ...@@ -516,7 +510,8 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) { if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
next_frag.au++; /* Pages are assigned at runtime. */
next_frag.pagep++;
next_frag.offset = 0; next_frag.offset = 0;
if (prev) if (prev)
prev->last_in_page = true; prev->last_in_page = true;
...@@ -533,22 +528,59 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) ...@@ -533,22 +528,59 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true; prev->last_in_page = true;
} }
static int mlx5e_init_au_list(struct mlx5e_rq *rq, int wq_sz, int node) static void mlx5e_init_xsk_buffs(struct mlx5e_rq *rq)
{
int i;
/* Assumptions used by XSK batched allocator. */
WARN_ON(rq->wqe.info.num_frags != 1);
WARN_ON(rq->wqe.info.log_num_frags != 0);
WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
/* Considering the above assumptions a fragment maps to a single
* xsk_buff.
*/
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++)
rq->wqe.frags[i].xskp = &rq->wqe.alloc_units->xsk_buffs[i];
}
static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node)
{ {
int wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
int len = wq_sz << rq->wqe.info.log_num_frags; int len = wq_sz << rq->wqe.info.log_num_frags;
struct mlx5e_wqe_frag_info *frags;
union mlx5e_alloc_units *aus;
int aus_sz;
rq->wqe.alloc_units = kvzalloc_node(array_size(len, sizeof(*rq->wqe.alloc_units)), if (rq->xsk_pool)
GFP_KERNEL, node); aus_sz = sizeof(*aus->xsk_buffs);
if (!rq->wqe.alloc_units) else
aus_sz = sizeof(*aus->pages);
aus = kvzalloc_node(array_size(len, aus_sz), GFP_KERNEL, node);
if (!aus)
return -ENOMEM; return -ENOMEM;
frags = kvzalloc_node(array_size(len, sizeof(*frags)), GFP_KERNEL, node);
if (!frags) {
kvfree(aus);
return -ENOMEM;
}
rq->wqe.alloc_units = aus;
rq->wqe.frags = frags;
if (rq->xsk_pool)
mlx5e_init_xsk_buffs(rq);
else
mlx5e_init_frags_partition(rq); mlx5e_init_frags_partition(rq);
return 0; return 0;
} }
static void mlx5e_free_au_list(struct mlx5e_rq *rq) static void mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq)
{ {
kvfree(rq->wqe.frags);
kvfree(rq->wqe.alloc_units); kvfree(rq->wqe.alloc_units);
} }
...@@ -778,18 +810,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, ...@@ -778,18 +810,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->wqe.info = rqp->frags_info; rq->wqe.info = rqp->frags_info;
rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride; rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
rq->wqe.frags = err = mlx5e_init_wqe_alloc_info(rq, node);
kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
(wq_sz << rq->wqe.info.log_num_frags)),
GFP_KERNEL, node);
if (!rq->wqe.frags) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
err = mlx5e_init_au_list(rq, wq_sz, node);
if (err) if (err)
goto err_rq_frags; goto err_rq_wq_destroy;
} }
if (xsk) { if (xsk) {
...@@ -888,9 +911,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, ...@@ -888,9 +911,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
mlx5e_free_mpwqe_rq_drop_page(rq); mlx5e_free_mpwqe_rq_drop_page(rq);
break; break;
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_au_list(rq); mlx5e_free_wqe_alloc_info(rq);
err_rq_frags:
kvfree(rq->wqe.frags);
} }
err_rq_wq_destroy: err_rq_wq_destroy:
mlx5_wq_destroy(&rq->wq_ctrl); mlx5_wq_destroy(&rq->wq_ctrl);
...@@ -921,8 +942,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) ...@@ -921,8 +942,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5e_rq_free_shampo(rq); mlx5e_rq_free_shampo(rq);
break; break;
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags); mlx5e_free_wqe_alloc_info(rq);
mlx5e_free_au_list(rq);
} }
for (i = rq->page_cache.head; i != rq->page_cache.tail; for (i = rq->page_cache.head; i != rq->page_cache.tail;
......
...@@ -371,12 +371,12 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, ...@@ -371,12 +371,12 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
int err = 0; int err = 0;
if (!frag->offset) if (!frag->offset)
/* On first frag (offset == 0), replenish page (alloc_unit actually). /* On first frag (offset == 0), replenish page.
* Other frags that point to the same alloc_unit (with a different * Other frags that point to the same page (with a different
* offset) should just use the new one without replenishing again * offset) should just use the new one without replenishing again
* by themselves. * by themselves.
*/ */
err = mlx5e_page_alloc_pool(rq, &frag->au->page); err = mlx5e_page_alloc_pool(rq, frag->pagep);
return err; return err;
} }
...@@ -386,7 +386,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, ...@@ -386,7 +386,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
bool recycle) bool recycle)
{ {
if (frag->last_in_page) if (frag->last_in_page)
mlx5e_page_release_dynamic(rq, frag->au->page, recycle); mlx5e_page_release_dynamic(rq, *frag->pagep, recycle);
} }
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
...@@ -410,7 +410,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, ...@@ -410,7 +410,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
goto free_frags; goto free_frags;
headroom = i == 0 ? rq->buff.headroom : 0; headroom = i == 0 ? rq->buff.headroom : 0;
addr = page_pool_get_dma_addr(frag->au->page); addr = page_pool_get_dma_addr(*frag->pagep);
wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom); wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
} }
...@@ -434,7 +434,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, ...@@ -434,7 +434,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
* put into the Reuse Ring, because there is no way to return * put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down. * the page to the userspace when the interface goes down.
*/ */
xsk_buff_free(wi->au->xsk); xsk_buff_free(*wi->xskp);
return; return;
} }
...@@ -1587,8 +1587,8 @@ static struct sk_buff * ...@@ -1587,8 +1587,8 @@ static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
struct mlx5_cqe64 *cqe, u32 cqe_bcnt) struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{ {
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom; u16 rx_headroom = rq->buff.headroom;
struct page *page = *wi->pagep;
struct bpf_prog *prog; struct bpf_prog *prog;
struct sk_buff *skb; struct sk_buff *skb;
u32 metasize = 0; u32 metasize = 0;
...@@ -1596,11 +1596,11 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, ...@@ -1596,11 +1596,11 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
dma_addr_t addr; dma_addr_t addr;
u32 frag_size; u32 frag_size;
va = page_address(au->page) + wi->offset; va = page_address(page) + wi->offset;
data = va + rx_headroom; data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
addr = page_pool_get_dma_addr(au->page); addr = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
frag_size, rq->buff.map_dir); frag_size, rq->buff.map_dir);
net_prefetch(data); net_prefetch(data);
...@@ -1624,7 +1624,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, ...@@ -1624,7 +1624,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
return NULL; return NULL;
/* queue up for recycling/reuse */ /* queue up for recycling/reuse */
page_ref_inc(au->page); page_ref_inc(page);
return skb; return skb;
} }
...@@ -1635,8 +1635,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1635,8 +1635,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
{ {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi; struct mlx5e_wqe_frag_info *head_wi = wi;
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom; u16 rx_headroom = rq->buff.headroom;
struct page *page = *wi->pagep;
struct skb_shared_info *sinfo; struct skb_shared_info *sinfo;
struct mlx5e_xdp_buff mxbuf; struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes; u32 frag_consumed_bytes;
...@@ -1646,10 +1646,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1646,10 +1646,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
u32 truesize; u32 truesize;
void *va; void *va;
va = page_address(au->page) + wi->offset; va = page_address(page) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
addr = page_pool_get_dma_addr(au->page); addr = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
rq->buff.frame0_sz, rq->buff.map_dir); rq->buff.frame0_sz, rq->buff.map_dir);
net_prefetchw(va); /* xdp_frame data area */ net_prefetchw(va); /* xdp_frame data area */
...@@ -1666,11 +1666,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1666,11 +1666,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
while (cqe_bcnt) { while (cqe_bcnt) {
skb_frag_t *frag; skb_frag_t *frag;
au = wi->au; page = *wi->pagep;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
addr = page_pool_get_dma_addr(au->page); addr = page_pool_get_dma_addr(page);
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset, dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, rq->buff.map_dir); frag_consumed_bytes, rq->buff.map_dir);
...@@ -1684,11 +1684,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1684,11 +1684,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
} }
frag = &sinfo->frags[sinfo->nr_frags++]; frag = &sinfo->frags[sinfo->nr_frags++];
__skb_frag_set_page(frag, au->page); __skb_frag_set_page(frag, page);
skb_frag_off_set(frag, wi->offset); skb_frag_off_set(frag, wi->offset);
skb_frag_size_set(frag, frag_consumed_bytes); skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(au->page)) if (page_is_pfmemalloc(page))
xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp); xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);
sinfo->xdp_frags_size += frag_consumed_bytes; sinfo->xdp_frags_size += frag_consumed_bytes;
...@@ -1717,7 +1717,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1717,7 +1717,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
page_ref_inc(head_wi->au->page); page_ref_inc(*head_wi->pagep);
if (xdp_buff_has_frags(&mxbuf.xdp)) { if (xdp_buff_has_frags(&mxbuf.xdp)) {
int i; int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment