Commit 291cfe36 authored by Fletcher Dunn's avatar Fletcher Dunn Committed by Daniel Borkmann

libbpf, xsk: Init all ring members in xsk_umem__create and xsk_socket__create

Fix a sharp edge in xsk_umem__create and xsk_socket__create.  Almost all of
the members of the ring buffer structs are initialized, but the "cached_xxx"
variables are not all initialized.  The caller is required to zero them.
This is needlessly dangerous.  The results if you don't do it can be very bad.
For example, they can cause xsk_prod_nb_free and xsk_cons_nb_avail to return
values greater than the size of the queue.  xsk_ring_cons__peek can return an
index that does not refer to an item that has been queued.

I have confirmed that without this change, my program misbehaves unless I
memset the ring buffers to zero before calling the function.  Afterwards,
my program works without (or with) the memset.
Signed-off-by: default avatarFletcher Dunn <fletcherd@valvesoftware.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/85f12913cde94b19bfcb598344701c38@valvesoftware.com
parent 2cf69d3f
...@@ -280,7 +280,11 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, ...@@ -280,7 +280,11 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
fill->consumer = map + off.fr.consumer; fill->consumer = map + off.fr.consumer;
fill->flags = map + off.fr.flags; fill->flags = map + off.fr.flags;
fill->ring = map + off.fr.desc; fill->ring = map + off.fr.desc;
fill->cached_cons = umem->config.fill_size; fill->cached_prod = *fill->producer;
/* cached_cons is "size" bigger than the real consumer pointer
* See xsk_prod_nb_free
*/
fill->cached_cons = *fill->consumer + umem->config.fill_size;
map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
...@@ -297,6 +301,8 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, ...@@ -297,6 +301,8 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
comp->consumer = map + off.cr.consumer; comp->consumer = map + off.cr.consumer;
comp->flags = map + off.cr.flags; comp->flags = map + off.cr.flags;
comp->ring = map + off.cr.desc; comp->ring = map + off.cr.desc;
comp->cached_prod = *comp->producer;
comp->cached_cons = *comp->consumer;
*umem_ptr = umem; *umem_ptr = umem;
return 0; return 0;
...@@ -672,6 +678,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, ...@@ -672,6 +678,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
rx->consumer = rx_map + off.rx.consumer; rx->consumer = rx_map + off.rx.consumer;
rx->flags = rx_map + off.rx.flags; rx->flags = rx_map + off.rx.flags;
rx->ring = rx_map + off.rx.desc; rx->ring = rx_map + off.rx.desc;
rx->cached_prod = *rx->producer;
rx->cached_cons = *rx->consumer;
} }
xsk->rx = rx; xsk->rx = rx;
...@@ -691,7 +699,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, ...@@ -691,7 +699,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
tx->consumer = tx_map + off.tx.consumer; tx->consumer = tx_map + off.tx.consumer;
tx->flags = tx_map + off.tx.flags; tx->flags = tx_map + off.tx.flags;
tx->ring = tx_map + off.tx.desc; tx->ring = tx_map + off.tx.desc;
tx->cached_cons = xsk->config.tx_size; tx->cached_prod = *tx->producer;
/* cached_cons is r->size bigger than the real consumer pointer
* See xsk_prod_nb_free
*/
tx->cached_cons = *tx->consumer + xsk->config.tx_size;
} }
xsk->tx = tx; xsk->tx = tx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment