Commit 64fe8c06 authored by Björn Töpel's avatar Björn Töpel Committed by Daniel Borkmann

xsk: Store struct xdp_sock as a flexible array member of the XSKMAP

Prior this commit, the array storing XDP socket instances were stored
in a separate allocated array of the XSKMAP. Now, we store the sockets
as a flexible array member in a similar fashion as the arraymap. Doing
so, we do less pointer chasing in the lookup.
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarJonathan Lemon <jonathan.lemon@gmail.com>
Link: https://lore.kernel.org/bpf/20191101110346.15004-2-bjorn.topel@gmail.com
parent 75b0bfd2
...@@ -11,9 +11,9 @@ ...@@ -11,9 +11,9 @@
struct xsk_map { struct xsk_map {
struct bpf_map map; struct bpf_map map;
struct xdp_sock **xsk_map;
struct list_head __percpu *flush_list; struct list_head __percpu *flush_list;
spinlock_t lock; /* Synchronize map updates */ spinlock_t lock; /* Synchronize map updates */
struct xdp_sock *xsk_map[];
}; };
int xsk_map_inc(struct xsk_map *map) int xsk_map_inc(struct xsk_map *map)
...@@ -80,9 +80,10 @@ static void xsk_map_sock_delete(struct xdp_sock *xs, ...@@ -80,9 +80,10 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{ {
struct bpf_map_memory mem;
int cpu, err, numa_node;
struct xsk_map *m; struct xsk_map *m;
int cpu, err; u64 cost, size;
u64 cost;
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
...@@ -92,44 +93,35 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -92,44 +93,35 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
m = kzalloc(sizeof(*m), GFP_USER); numa_node = bpf_map_attr_numa_node(attr);
if (!m) size = struct_size(m, xsk_map, attr->max_entries);
cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
err = bpf_map_charge_init(&mem, cost);
if (err < 0)
return ERR_PTR(err);
m = bpf_map_area_alloc(size, numa_node);
if (!m) {
bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
bpf_map_init_from_attr(&m->map, attr); bpf_map_init_from_attr(&m->map, attr);
bpf_map_charge_move(&m->map.memory, &mem);
spin_lock_init(&m->lock); spin_lock_init(&m->lock);
cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
cost += sizeof(struct list_head) * num_possible_cpus();
/* Notice returns -EPERM on if map size is larger than memlock limit */
err = bpf_map_charge_init(&m->map.memory, cost);
if (err)
goto free_m;
err = -ENOMEM;
m->flush_list = alloc_percpu(struct list_head); m->flush_list = alloc_percpu(struct list_head);
if (!m->flush_list) if (!m->flush_list) {
goto free_charge; bpf_map_charge_finish(&m->map.memory);
bpf_map_area_free(m);
return ERR_PTR(-ENOMEM);
}
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu)); INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
sizeof(struct xdp_sock *),
m->map.numa_node);
if (!m->xsk_map)
goto free_percpu;
return &m->map; return &m->map;
free_percpu:
free_percpu(m->flush_list);
free_charge:
bpf_map_charge_finish(&m->map.memory);
free_m:
kfree(m);
return ERR_PTR(err);
} }
static void xsk_map_free(struct bpf_map *map) static void xsk_map_free(struct bpf_map *map)
...@@ -139,8 +131,7 @@ static void xsk_map_free(struct bpf_map *map) ...@@ -139,8 +131,7 @@ static void xsk_map_free(struct bpf_map *map)
bpf_clear_redirect_map(map); bpf_clear_redirect_map(map);
synchronize_net(); synchronize_net();
free_percpu(m->flush_list); free_percpu(m->flush_list);
bpf_map_area_free(m->xsk_map); bpf_map_area_free(m);
kfree(m);
} }
static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment