Commit 78db77fa authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-xskmap-perf-improvements'

Björn Töpel says:

====================
This set consists of three patches from Maciej and myself which are
optimizing the XSKMAP lookups.  In the first patch, the sockets are
moved to be stored at the tail of the struct xsk_map. The second
patch, Maciej implements map_gen_lookup() for XSKMAP. The third patch,
introduced in this revision, moves various XSKMAP functions, to permit
the compiler to do more aggressive inlining.

Based on the XDP program from tools/lib/bpf/xsk.c where
bpf_map_lookup_elem() is explicitly called, this work yields a 5%
improvement for xdpsock's rxdrop scenario. The last patch yields 2%
improvement.

Jonathan's Acked-by: for patch 1 and 2 was carried on. Note that the
overflow checks are done in the bpf_map_area_alloc() and
bpf_map_charge_init() functions, which was fixed in commit
ff1c08e1 ("bpf: Change size to u64 for bpf_map_{area_alloc,
charge_init}()").

  [1] https://patchwork.ozlabs.org/patch/1186170/

v1->v2: * Change size/cost to size_t and use {struct, array}_size
          where appropriate. (Jakub)
v2->v3: * Proper commit message for patch 2.
v3->v4: * Change size_t to u64 to handle 32-bit overflows. (Jakub)
        * Introduced patch 3.
v4->v5: * Use BPF_SIZEOF size, instead of BPF_DW, for correct
          pointer-sized loads. (Daniel)
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 75b0bfd2 d817991c
...@@ -1009,31 +1009,6 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr, ...@@ -1009,31 +1009,6 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
} }
#endif #endif
#if defined(CONFIG_XDP_SOCKETS)
struct xdp_sock;
struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
struct xdp_sock *xs);
void __xsk_map_flush(struct bpf_map *map);
#else
struct xdp_sock;
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u32 key)
{
return NULL;
}
static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
struct xdp_sock *xs)
{
return -EOPNOTSUPP;
}
static inline void __xsk_map_flush(struct bpf_map *map)
{
}
#endif
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk); void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
......
...@@ -69,7 +69,14 @@ struct xdp_umem { ...@@ -69,7 +69,14 @@ struct xdp_umem {
/* Nodes are linked in the struct xdp_sock map_list field, and used to /* Nodes are linked in the struct xdp_sock map_list field, and used to
* track which maps a certain socket reside in. * track which maps a certain socket reside in.
*/ */
struct xsk_map;
struct xsk_map {
struct bpf_map map;
struct list_head __percpu *flush_list;
spinlock_t lock; /* Synchronize map updates */
struct xdp_sock *xsk_map[];
};
struct xsk_map_node { struct xsk_map_node {
struct list_head node; struct list_head node;
struct xsk_map *map; struct xsk_map *map;
...@@ -109,8 +116,6 @@ struct xdp_sock { ...@@ -109,8 +116,6 @@ struct xdp_sock {
struct xdp_buff; struct xdp_buff;
#ifdef CONFIG_XDP_SOCKETS #ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
void xsk_flush(struct xdp_sock *xs);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */ /* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
...@@ -134,6 +139,22 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, ...@@ -134,6 +139,22 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry); struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map); int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map); void xsk_map_put(struct xsk_map *map);
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
struct xdp_sock *xs);
void __xsk_map_flush(struct bpf_map *map);
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u32 key)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
struct xdp_sock *xs;
if (key >= map->max_entries)
return NULL;
xs = READ_ONCE(m->xsk_map[key]);
return xs;
}
static inline u64 xsk_umem_extract_addr(u64 addr) static inline u64 xsk_umem_extract_addr(u64 addr)
{ {
...@@ -224,15 +245,6 @@ static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -224,15 +245,6 @@ static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return -ENOTSUPP; return -ENOTSUPP;
} }
static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -ENOTSUPP;
}
static inline void xsk_flush(struct xdp_sock *xs)
{
}
static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{ {
return false; return false;
...@@ -357,6 +369,21 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, ...@@ -357,6 +369,21 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
return 0; return 0;
} }
static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
struct xdp_sock *xs)
{
return -EOPNOTSUPP;
}
static inline void __xsk_map_flush(struct bpf_map *map)
{
}
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u32 key)
{
return NULL;
}
#endif /* CONFIG_XDP_SOCKETS */ #endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */ #endif /* _LINUX_XDP_SOCK_H */
...@@ -9,13 +9,6 @@ ...@@ -9,13 +9,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched.h> #include <linux/sched.h>
struct xsk_map {
struct bpf_map map;
struct xdp_sock **xsk_map;
struct list_head __percpu *flush_list;
spinlock_t lock; /* Synchronize map updates */
};
int xsk_map_inc(struct xsk_map *map) int xsk_map_inc(struct xsk_map *map)
{ {
struct bpf_map *m = &map->map; struct bpf_map *m = &map->map;
...@@ -80,9 +73,10 @@ static void xsk_map_sock_delete(struct xdp_sock *xs, ...@@ -80,9 +73,10 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{ {
struct bpf_map_memory mem;
int cpu, err, numa_node;
struct xsk_map *m; struct xsk_map *m;
int cpu, err; u64 cost, size;
u64 cost;
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
...@@ -92,44 +86,35 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -92,44 +86,35 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
m = kzalloc(sizeof(*m), GFP_USER); numa_node = bpf_map_attr_numa_node(attr);
if (!m) size = struct_size(m, xsk_map, attr->max_entries);
cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
err = bpf_map_charge_init(&mem, cost);
if (err < 0)
return ERR_PTR(err);
m = bpf_map_area_alloc(size, numa_node);
if (!m) {
bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
bpf_map_init_from_attr(&m->map, attr); bpf_map_init_from_attr(&m->map, attr);
bpf_map_charge_move(&m->map.memory, &mem);
spin_lock_init(&m->lock); spin_lock_init(&m->lock);
cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
cost += sizeof(struct list_head) * num_possible_cpus();
/* Notice returns -EPERM on if map size is larger than memlock limit */
err = bpf_map_charge_init(&m->map.memory, cost);
if (err)
goto free_m;
err = -ENOMEM;
m->flush_list = alloc_percpu(struct list_head); m->flush_list = alloc_percpu(struct list_head);
if (!m->flush_list) if (!m->flush_list) {
goto free_charge; bpf_map_charge_finish(&m->map.memory);
bpf_map_area_free(m);
return ERR_PTR(-ENOMEM);
}
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu)); INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
sizeof(struct xdp_sock *),
m->map.numa_node);
if (!m->xsk_map)
goto free_percpu;
return &m->map; return &m->map;
free_percpu:
free_percpu(m->flush_list);
free_charge:
bpf_map_charge_finish(&m->map.memory);
free_m:
kfree(m);
return ERR_PTR(err);
} }
static void xsk_map_free(struct bpf_map *map) static void xsk_map_free(struct bpf_map *map)
...@@ -139,8 +124,7 @@ static void xsk_map_free(struct bpf_map *map) ...@@ -139,8 +124,7 @@ static void xsk_map_free(struct bpf_map *map)
bpf_clear_redirect_map(map); bpf_clear_redirect_map(map);
synchronize_net(); synchronize_net();
free_percpu(m->flush_list); free_percpu(m->flush_list);
bpf_map_area_free(m->xsk_map); bpf_map_area_free(m);
kfree(m);
} }
static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
...@@ -160,45 +144,20 @@ static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) ...@@ -160,45 +144,20 @@ static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0; return 0;
} }
struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key) static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{ {
struct xsk_map *m = container_of(map, struct xsk_map, map); const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
struct xdp_sock *xs; struct bpf_insn *insn = insn_buf;
if (key >= map->max_entries) *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
return NULL; *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
xs = READ_ONCE(m->xsk_map[key]); *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
return xs; *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
} *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, *insn++ = BPF_MOV64_IMM(ret, 0);
struct xdp_sock *xs) return insn - insn_buf;
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
int err;
err = xsk_rcv(xs, xdp);
if (err)
return err;
if (!xs->flush_node.prev)
list_add(&xs->flush_node, flush_list);
return 0;
}
void __xsk_map_flush(struct bpf_map *map)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
xsk_flush(xs);
__list_del_clearprev(&xs->flush_node);
}
} }
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key) static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
...@@ -312,6 +271,7 @@ const struct bpf_map_ops xsk_map_ops = { ...@@ -312,6 +271,7 @@ const struct bpf_map_ops xsk_map_ops = {
.map_free = xsk_map_free, .map_free = xsk_map_free,
.map_get_next_key = xsk_map_get_next_key, .map_get_next_key = xsk_map_get_next_key,
.map_lookup_elem = xsk_map_lookup_elem, .map_lookup_elem = xsk_map_lookup_elem,
.map_gen_lookup = xsk_map_gen_lookup,
.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only, .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
.map_update_elem = xsk_map_update_elem, .map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem, .map_delete_elem = xsk_map_delete_elem,
......
...@@ -196,7 +196,7 @@ static bool xsk_is_bound(struct xdp_sock *xs) ...@@ -196,7 +196,7 @@ static bool xsk_is_bound(struct xdp_sock *xs)
return false; return false;
} }
int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{ {
u32 len; u32 len;
...@@ -212,7 +212,7 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -212,7 +212,7 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len); __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
} }
void xsk_flush(struct xdp_sock *xs) static void xsk_flush(struct xdp_sock *xs)
{ {
xskq_produce_flush_desc(xs->rx); xskq_produce_flush_desc(xs->rx);
xs->sk.sk_data_ready(&xs->sk); xs->sk.sk_data_ready(&xs->sk);
...@@ -264,6 +264,35 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -264,6 +264,35 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return err; return err;
} }
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
struct xdp_sock *xs)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
int err;
err = xsk_rcv(xs, xdp);
if (err)
return err;
if (!xs->flush_node.prev)
list_add(&xs->flush_node, flush_list);
return 0;
}
void __xsk_map_flush(struct bpf_map *map)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
xsk_flush(xs);
__list_del_clearprev(&xs->flush_node);
}
}
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{ {
xskq_produce_flush_addr_n(umem->cq, nb_entries); xskq_produce_flush_addr_n(umem->cq, nb_entries);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment