Commit 86e41755 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Alexei Starovoitov

selftests/xsk: populate fill ring based on frags needed

Populate the fill ring based on the number of frags a packet
needs. With multi-buffer support, a packet might require more than a
single fragment/buffer, so the function xsk_populate_fill_ring() needs
to consider how many buffers a packet will consume, and put that many
buffers on the fill ring for each packet it should receive. As we are
still not sending any multi-buffer packets, the function will only
produce one buffer per packet at the moment.
Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/r/20230516103109.3066-9-magnus.karlsson@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 041b68f6
......@@ -134,6 +134,11 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
__atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE);
}
static inline void xsk_ring_prod__cancel(struct xsk_ring_prod *prod, __u32 nb)
{
prod->cached_prod -= nb;
}
static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
{
__u32 entries = xsk_cons_nb_avail(cons, nb);
......
......@@ -531,6 +531,18 @@ static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
return pkt_stream;
}
static u32 ceil_u32(u32 a, u32 b)
{
return (a + b - 1) / b;
}
static u32 pkt_nb_frags(u32 frame_size, struct pkt *pkt)
{
if (!pkt || !pkt->valid)
return 1;
return ceil_u32(pkt->len, frame_size);
}
static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len)
{
pkt->offset = offset;
......@@ -1159,9 +1171,11 @@ static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobje
ifobject->umem->base_addr = 0;
}
static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
bool fill_up)
{
u32 idx = 0, i, buffers_to_fill, nb_pkts;
u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
int ret;
if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
......@@ -1173,19 +1187,29 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
if (ret != buffers_to_fill)
exit_with_error(ENOSPC);
for (i = 0; i < buffers_to_fill; i++) {
while (filled < buffers_to_fill) {
struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
u64 addr;
u32 i;
for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt); i++) {
if (!pkt) {
if (!fill_up)
break;
addr = filled * umem->frame_size + umem->base_addr;
} else if (pkt->offset >= 0) {
addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
} else {
addr = pkt->offset + umem_alloc_buffer(umem);
}
if (!pkt)
addr = i * umem->frame_size + umem->base_addr;
else if (pkt->offset >= 0)
addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
else
addr = pkt->offset + umem_alloc_buffer(umem);
*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
if (++filled >= buffers_to_fill)
break;
}
}
xsk_ring_prod__submit(&umem->fq, i);
xsk_ring_prod__submit(&umem->fq, filled);
xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
pkt_stream_reset(pkt_stream);
umem_reset_alloc(umem);
......@@ -1220,7 +1244,7 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (!ifobject->rx_on)
return;
xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring);
ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment