Commit 4e64c835 authored by Björn Töpel's avatar Björn Töpel Committed by Daniel Borkmann

xsk: proper fill queue descriptor validation

Previously the fill queue descriptor was not copied to kernel space
prior validating it, making it possible for userland to change the
descriptor post-kernel-validation.
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent bd3a08aa
...@@ -41,20 +41,19 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) ...@@ -41,20 +41,19 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{ {
u32 *id, len = xdp->data_end - xdp->data; u32 id, len = xdp->data_end - xdp->data;
void *buffer; void *buffer;
int err = 0; int err;
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
return -EINVAL; return -EINVAL;
id = xskq_peek_id(xs->umem->fq); if (!xskq_peek_id(xs->umem->fq, &id))
if (!id)
return -ENOSPC; return -ENOSPC;
buffer = xdp_umem_get_data_with_headroom(xs->umem, *id); buffer = xdp_umem_get_data_with_headroom(xs->umem, id);
memcpy(buffer, xdp->data, len); memcpy(buffer, xdp->data, len);
err = xskq_produce_batch_desc(xs->rx, *id, len, err = xskq_produce_batch_desc(xs->rx, id, len,
xs->umem->frame_headroom); xs->umem->frame_headroom);
if (!err) if (!err)
xskq_discard_id(xs->umem->fq); xskq_discard_id(xs->umem->fq);
......
...@@ -85,14 +85,15 @@ static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx) ...@@ -85,14 +85,15 @@ static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
return true; return true;
} }
static inline u32 *xskq_validate_id(struct xsk_queue *q) static inline u32 *xskq_validate_id(struct xsk_queue *q, u32 *id)
{ {
while (q->cons_tail != q->cons_head) { while (q->cons_tail != q->cons_head) {
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask; unsigned int idx = q->cons_tail & q->ring_mask;
if (xskq_is_valid_id(q, ring->desc[idx])) *id = READ_ONCE(ring->desc[idx]);
return &ring->desc[idx]; if (xskq_is_valid_id(q, *id))
return id;
q->cons_tail++; q->cons_tail++;
} }
...@@ -100,28 +101,22 @@ static inline u32 *xskq_validate_id(struct xsk_queue *q) ...@@ -100,28 +101,22 @@ static inline u32 *xskq_validate_id(struct xsk_queue *q)
return NULL; return NULL;
} }
static inline u32 *xskq_peek_id(struct xsk_queue *q) static inline u32 *xskq_peek_id(struct xsk_queue *q, u32 *id)
{ {
struct xdp_umem_ring *ring;
if (q->cons_tail == q->cons_head) { if (q->cons_tail == q->cons_head) {
WRITE_ONCE(q->ring->consumer, q->cons_tail); WRITE_ONCE(q->ring->consumer, q->cons_tail);
q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
/* Order consumer and data */ /* Order consumer and data */
smp_rmb(); smp_rmb();
return xskq_validate_id(q);
} }
ring = (struct xdp_umem_ring *)q->ring; return xskq_validate_id(q, id);
return &ring->desc[q->cons_tail & q->ring_mask];
} }
static inline void xskq_discard_id(struct xsk_queue *q) static inline void xskq_discard_id(struct xsk_queue *q)
{ {
q->cons_tail++; q->cons_tail++;
(void)xskq_validate_id(q);
} }
static inline int xskq_produce_id(struct xsk_queue *q, u32 id) static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
...@@ -174,11 +169,9 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, ...@@ -174,11 +169,9 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask; unsigned int idx = q->cons_tail & q->ring_mask;
if (xskq_is_valid_desc(q, &ring->desc[idx])) { *desc = READ_ONCE(ring->desc[idx]);
if (desc) if (xskq_is_valid_desc(q, desc))
*desc = ring->desc[idx];
return desc; return desc;
}
q->cons_tail++; q->cons_tail++;
} }
...@@ -189,27 +182,20 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, ...@@ -189,27 +182,20 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
struct xdp_desc *desc) struct xdp_desc *desc)
{ {
struct xdp_rxtx_ring *ring;
if (q->cons_tail == q->cons_head) { if (q->cons_tail == q->cons_head) {
WRITE_ONCE(q->ring->consumer, q->cons_tail); WRITE_ONCE(q->ring->consumer, q->cons_tail);
q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
/* Order consumer and data */ /* Order consumer and data */
smp_rmb(); smp_rmb();
return xskq_validate_desc(q, desc);
} }
ring = (struct xdp_rxtx_ring *)q->ring; return xskq_validate_desc(q, desc);
*desc = ring->desc[q->cons_tail & q->ring_mask];
return desc;
} }
static inline void xskq_discard_desc(struct xsk_queue *q) static inline void xskq_discard_desc(struct xsk_queue *q)
{ {
q->cons_tail++; q->cons_tail++;
(void)xskq_validate_desc(q, NULL);
} }
static inline int xskq_produce_batch_desc(struct xsk_queue *q, static inline int xskq_produce_batch_desc(struct xsk_queue *q,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment