Commit 93ee30f3 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Daniel Borkmann

xsk: i40e: get rid of useless struct xdp_umem_props

This commit gets rid of the structure xdp_umem_props. It was there to
be able to break a dependency at one point, but this is no longer
needed. The values in the struct are instead stored directly in the
xdp_umem structure. This simplifies the xsk code as well as af_xdp
zero-copy drivers and as a bonus gets rid of one internal header file.

The i40e driver is also adapted to the new interface in this commit.
Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent cf484f9f
......@@ -442,7 +442,7 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *old_bi)
{
struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
unsigned long mask = (unsigned long)rx_ring->xsk_umem->props.chunk_mask;
unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
u16 nta = rx_ring->next_to_alloc;
......@@ -477,7 +477,7 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
rx_ring = container_of(alloc, struct i40e_ring, zca);
hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
mask = rx_ring->xsk_umem->props.chunk_mask;
mask = rx_ring->xsk_umem->chunk_mask;
nta = rx_ring->next_to_alloc;
bi = &rx_ring->rx_bi[nta];
......
......@@ -16,11 +16,6 @@
struct net_device;
struct xsk_queue;
struct xdp_umem_props {
u64 chunk_mask;
u64 size;
};
struct xdp_umem_page {
void *addr;
dma_addr_t dma;
......@@ -30,7 +25,8 @@ struct xdp_umem {
struct xsk_queue *fq;
struct xsk_queue *cq;
struct xdp_umem_page *pages;
struct xdp_umem_props props;
u64 chunk_mask;
u64 size;
u32 headroom;
u32 chunk_size_nohr;
struct user_struct *user;
......
......@@ -312,8 +312,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->pid = get_task_pid(current, PIDTYPE_PID);
umem->address = (unsigned long)addr;
umem->props.chunk_mask = ~((u64)chunk_size - 1);
umem->props.size = size;
umem->chunk_mask = ~((u64)chunk_size - 1);
umem->size = size;
umem->headroom = headroom;
umem->chunk_size_nohr = chunk_size - headroom;
umem->npgs = size / PAGE_SIZE;
......
/* SPDX-License-Identifier: GPL-2.0 */
/* XDP user-space packet buffer
* Copyright(c) 2018 Intel Corporation.
*/
#ifndef XDP_UMEM_PROPS_H_
#define XDP_UMEM_PROPS_H_
struct xdp_umem_props {
u64 chunk_mask;
u64 size;
};
#endif /* XDP_UMEM_PROPS_H_ */
......@@ -470,8 +470,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
goto out_unlock;
} else {
/* This xsk has its own umem. */
xskq_set_umem(xs->umem->fq, &xs->umem->props);
xskq_set_umem(xs->umem->cq, &xs->umem->props);
xskq_set_umem(xs->umem->fq, xs->umem->size,
xs->umem->chunk_mask);
xskq_set_umem(xs->umem->cq, xs->umem->size,
xs->umem->chunk_mask);
err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
if (err)
......@@ -481,8 +483,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->dev = dev;
xs->zc = xs->umem->zc;
xs->queue_id = qid;
xskq_set_umem(xs->rx, &xs->umem->props);
xskq_set_umem(xs->tx, &xs->umem->props);
xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
xdp_add_sk_umem(xs->umem, xs);
out_unlock:
......
......@@ -7,12 +7,13 @@
#include "xsk_queue.h"
void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props)
void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
{
if (!q)
return;
q->umem_props = *umem_props;
q->size = size;
q->chunk_mask = chunk_mask;
}
static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
......
......@@ -31,7 +31,8 @@ struct xdp_umem_ring {
};
struct xsk_queue {
struct xdp_umem_props umem_props;
u64 chunk_mask;
u64 size;
u32 ring_mask;
u32 nentries;
u32 prod_head;
......@@ -78,7 +79,7 @@ static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
{
if (addr >= q->umem_props.size) {
if (addr >= q->size) {
q->invalid_descs++;
return false;
}
......@@ -92,7 +93,7 @@ static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask;
*addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
*addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
if (xskq_is_valid_addr(q, *addr))
return addr;
......@@ -173,8 +174,8 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
if (!xskq_is_valid_addr(q, d->addr))
return false;
if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
(d->addr & q->umem_props.chunk_mask)) {
if (((d->addr + d->len) & q->chunk_mask) !=
(d->addr & q->chunk_mask)) {
q->invalid_descs++;
return false;
}
......@@ -253,7 +254,7 @@ static inline bool xskq_empty_desc(struct xsk_queue *q)
return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
}
void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment