Commit d3406913 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'devmap_hash'

Toke Høiland-Jørgensen says:

====================
This series adds a new map type, devmap_hash, that works like the existing
devmap type, but using a hash-based indexing scheme. This is useful for the use
case where a devmap is indexed by ifindex (for instance for use with the routing
table lookup helper). For this use case, the regular devmap needs to be sized
after the maximum ifindex number, not the number of devices in it. A hash-based
indexing scheme makes it possible to size the map after the number of devices it
should contain instead.

This was previously part of my patch series that also turned the regular
bpf_redirect() helper into a map-based one; for this series I just pulled out
the patches that introduced the new map type.

Changelog:

v5:

- Dynamically set the number of hash buckets by rounding up max_entries to the
  nearest power of two (mirroring the regular hashmap), as suggested by Jesper.

v4:

- Remove check_memlock parameter that was left over from an earlier patch
  series.
- Reorder struct members to avoid holes.

v3:

- Rework the split into different patches
- Use spin_lock_irqsave()
- Also add documentation and bash completion definitions for bpftool

v2:

- Split commit adding the new map type so uapi and tools changes are separate.

Changes to these patches since the previous series:

- Rebase on top of the other devmap changes (makes this one simpler!)
- Don't enforce key==val, but allow arbitrary indexes.
- Rename the type to devmap_hash to reflect the fact that it's just a hashmap now.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 475e31f8 1375dc4a
...@@ -713,7 +713,7 @@ struct xdp_buff; ...@@ -713,7 +713,7 @@ struct xdp_buff;
struct sk_buff; struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index); struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_flush(struct bpf_map *map); void __dev_map_flush(struct bpf_map *map);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx); struct net_device *dev_rx);
...@@ -721,7 +721,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, ...@@ -721,7 +721,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog); struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
void __cpu_map_flush(struct bpf_map *map); void __cpu_map_flush(struct bpf_map *map);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx); struct net_device *dev_rx);
...@@ -801,8 +800,10 @@ static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, ...@@ -801,8 +800,10 @@ static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
return NULL; return NULL;
} }
static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map,
u32 key)
{ {
return NULL;
} }
static inline void __dev_map_flush(struct bpf_map *map) static inline void __dev_map_flush(struct bpf_map *map)
...@@ -834,10 +835,6 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) ...@@ -834,10 +835,6 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
return NULL; return NULL;
} }
static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index)
{
}
static inline void __cpu_map_flush(struct bpf_map *map) static inline void __cpu_map_flush(struct bpf_map *map)
{ {
} }
......
...@@ -62,6 +62,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) ...@@ -62,6 +62,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET #ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
#if defined(CONFIG_BPF_STREAM_PARSER) #if defined(CONFIG_BPF_STREAM_PARSER)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
......
...@@ -175,7 +175,8 @@ struct _bpf_dtab_netdev { ...@@ -175,7 +175,8 @@ struct _bpf_dtab_netdev {
#endif /* __DEVMAP_OBJ_TYPE */ #endif /* __DEVMAP_OBJ_TYPE */
#define devmap_ifindex(fwd, map) \ #define devmap_ifindex(fwd, map) \
((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \ ((map->map_type == BPF_MAP_TYPE_DEVMAP || \
map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \
((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0) ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)
#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
......
...@@ -134,6 +134,7 @@ enum bpf_map_type { ...@@ -134,6 +134,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE, BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
}; };
/* Note that tracing related programs such as /* Note that tracing related programs such as
......
This diff is collapsed.
...@@ -3457,6 +3457,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, ...@@ -3457,6 +3457,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
goto error; goto error;
break; break;
case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
if (func_id != BPF_FUNC_redirect_map && if (func_id != BPF_FUNC_redirect_map &&
func_id != BPF_FUNC_map_lookup_elem) func_id != BPF_FUNC_map_lookup_elem)
goto error; goto error;
...@@ -3539,6 +3540,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, ...@@ -3539,6 +3540,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
break; break;
case BPF_FUNC_redirect_map: case BPF_FUNC_redirect_map:
if (map->map_type != BPF_MAP_TYPE_DEVMAP && if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_CPUMAP &&
map->map_type != BPF_MAP_TYPE_XSKMAP) map->map_type != BPF_MAP_TYPE_XSKMAP)
goto error; goto error;
......
...@@ -3517,7 +3517,8 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, ...@@ -3517,7 +3517,8 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
int err; int err;
switch (map->map_type) { switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP: { case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH: {
struct bpf_dtab_netdev *dst = fwd; struct bpf_dtab_netdev *dst = fwd;
err = dev_map_enqueue(dst, xdp, dev_rx); err = dev_map_enqueue(dst, xdp, dev_rx);
...@@ -3554,6 +3555,7 @@ void xdp_do_flush_map(void) ...@@ -3554,6 +3555,7 @@ void xdp_do_flush_map(void)
if (map) { if (map) {
switch (map->map_type) { switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
__dev_map_flush(map); __dev_map_flush(map);
break; break;
case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_CPUMAP:
...@@ -3574,6 +3576,8 @@ static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index) ...@@ -3574,6 +3576,8 @@ static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
switch (map->map_type) { switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP:
return __dev_map_lookup_elem(map, index); return __dev_map_lookup_elem(map, index);
case BPF_MAP_TYPE_DEVMAP_HASH:
return __dev_map_hash_lookup_elem(map, index);
case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_CPUMAP:
return __cpu_map_lookup_elem(map, index); return __cpu_map_lookup_elem(map, index);
case BPF_MAP_TYPE_XSKMAP: case BPF_MAP_TYPE_XSKMAP:
...@@ -3655,7 +3659,8 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, ...@@ -3655,7 +3659,8 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
ri->tgt_value = NULL; ri->tgt_value = NULL;
WRITE_ONCE(ri->map, NULL); WRITE_ONCE(ri->map, NULL);
if (map->map_type == BPF_MAP_TYPE_DEVMAP) { if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
struct bpf_dtab_netdev *dst = fwd; struct bpf_dtab_netdev *dst = fwd;
err = dev_map_generic_redirect(dst, skb, xdp_prog); err = dev_map_generic_redirect(dst, skb, xdp_prog);
......
...@@ -46,7 +46,7 @@ MAP COMMANDS ...@@ -46,7 +46,7 @@ MAP COMMANDS
| *TYPE* := { **hash** | **array** | **prog_array** | **perf_event_array** | **percpu_hash** | *TYPE* := { **hash** | **array** | **prog_array** | **perf_event_array** | **percpu_hash**
| | **percpu_array** | **stack_trace** | **cgroup_array** | **lru_hash** | | **percpu_array** | **stack_trace** | **cgroup_array** | **lru_hash**
| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps** | | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
| | **devmap** | **sockmap** | **cpumap** | **xskmap** | **sockhash** | | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage** | | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
| | **queue** | **stack** } | | **queue** | **stack** }
......
...@@ -489,8 +489,8 @@ _bpftool() ...@@ -489,8 +489,8 @@ _bpftool()
perf_event_array percpu_hash percpu_array \ perf_event_array percpu_hash percpu_array \
stack_trace cgroup_array lru_hash \ stack_trace cgroup_array lru_hash \
lru_percpu_hash lpm_trie array_of_maps \ lru_percpu_hash lpm_trie array_of_maps \
hash_of_maps devmap sockmap cpumap xskmap \ hash_of_maps devmap devmap_hash sockmap cpumap \
sockhash cgroup_storage reuseport_sockarray \ xskmap sockhash cgroup_storage reuseport_sockarray \
percpu_cgroup_storage queue stack' -- \ percpu_cgroup_storage queue stack' -- \
"$cur" ) ) "$cur" ) )
return 0 return 0
......
...@@ -37,6 +37,7 @@ const char * const map_type_name[] = { ...@@ -37,6 +37,7 @@ const char * const map_type_name[] = {
[BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps", [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
[BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps", [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
[BPF_MAP_TYPE_DEVMAP] = "devmap", [BPF_MAP_TYPE_DEVMAP] = "devmap",
[BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
[BPF_MAP_TYPE_SOCKMAP] = "sockmap", [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
[BPF_MAP_TYPE_CPUMAP] = "cpumap", [BPF_MAP_TYPE_CPUMAP] = "cpumap",
[BPF_MAP_TYPE_XSKMAP] = "xskmap", [BPF_MAP_TYPE_XSKMAP] = "xskmap",
...@@ -1271,7 +1272,7 @@ static int do_help(int argc, char **argv) ...@@ -1271,7 +1272,7 @@ static int do_help(int argc, char **argv)
" TYPE := { hash | array | prog_array | perf_event_array | percpu_hash |\n" " TYPE := { hash | array | prog_array | perf_event_array | percpu_hash |\n"
" percpu_array | stack_trace | cgroup_array | lru_hash |\n" " percpu_array | stack_trace | cgroup_array | lru_hash |\n"
" lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n" " lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
" devmap | sockmap | cpumap | xskmap | sockhash |\n" " devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage }\n" " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage }\n"
" " HELP_SPEC_OPTIONS "\n" " " HELP_SPEC_OPTIONS "\n"
"", "",
......
...@@ -134,6 +134,7 @@ enum bpf_map_type { ...@@ -134,6 +134,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE, BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
}; };
/* Note that tracing related programs such as /* Note that tracing related programs such as
......
...@@ -244,6 +244,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) ...@@ -244,6 +244,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS:
case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_SOCKMAP: case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_CPUMAP:
case BPF_MAP_TYPE_XSKMAP: case BPF_MAP_TYPE_XSKMAP:
......
...@@ -508,6 +508,21 @@ static void test_devmap(unsigned int task, void *data) ...@@ -508,6 +508,21 @@ static void test_devmap(unsigned int task, void *data)
close(fd); close(fd);
} }
static void test_devmap_hash(unsigned int task, void *data)
{
int fd;
__u32 key, value;
fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP_HASH, sizeof(key), sizeof(value),
2, 0);
if (fd < 0) {
printf("Failed to create devmap_hash '%s'!\n", strerror(errno));
exit(1);
}
close(fd);
}
static void test_queuemap(unsigned int task, void *data) static void test_queuemap(unsigned int task, void *data)
{ {
const int MAP_SIZE = 32; const int MAP_SIZE = 32;
...@@ -1684,6 +1699,7 @@ static void run_all_tests(void) ...@@ -1684,6 +1699,7 @@ static void run_all_tests(void)
test_arraymap_percpu_many_keys(); test_arraymap_percpu_many_keys();
test_devmap(0, NULL); test_devmap(0, NULL);
test_devmap_hash(0, NULL);
test_sockmap(0, NULL); test_sockmap(0, NULL);
test_map_large(); test_map_large();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment