Commit b4fd0d67 authored by Yafang Shao's avatar Yafang Shao Committed by Alexei Starovoitov

bpf, net: xskmap memory usage

A new helper is introduced to calculate xskmap memory usage.

The xfsmap memory usage can be dynamically changed when we add or remove
a xsk_map_node. Hence we need to track the count of xsk_map_node to get
its memory usage.

The result as follows,
- before
10: xskmap  name count_map  flags 0x0
        key 4B  value 4B  max_entries 65536  memlock 524288B

- after
10: xskmap  name count_map  flags 0x0 <<< no elements case
        key 4B  value 4B  max_entries 65536  memlock 524608B
Signed-off-by: default avatarYafang Shao <laoar.shao@gmail.com>
Link: https://lore.kernel.org/r/20230305124615.12358-17-laoar.shao@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 73d2c619
...@@ -38,6 +38,7 @@ struct xdp_umem { ...@@ -38,6 +38,7 @@ struct xdp_umem {
struct xsk_map { struct xsk_map {
struct bpf_map map; struct bpf_map map;
spinlock_t lock; /* Synchronize map updates */ spinlock_t lock; /* Synchronize map updates */
atomic_t count;
struct xdp_sock __rcu *xsk_map[]; struct xdp_sock __rcu *xsk_map[];
}; };
......
...@@ -24,6 +24,7 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, ...@@ -24,6 +24,7 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
bpf_map_inc(&map->map); bpf_map_inc(&map->map);
atomic_inc(&map->count);
node->map = map; node->map = map;
node->map_entry = map_entry; node->map_entry = map_entry;
...@@ -32,8 +33,11 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, ...@@ -32,8 +33,11 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
static void xsk_map_node_free(struct xsk_map_node *node) static void xsk_map_node_free(struct xsk_map_node *node)
{ {
struct xsk_map *map = node->map;
bpf_map_put(&node->map->map); bpf_map_put(&node->map->map);
kfree(node); kfree(node);
atomic_dec(&map->count);
} }
static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node) static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
...@@ -85,6 +89,14 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -85,6 +89,14 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
return &m->map; return &m->map;
} }
static u64 xsk_map_mem_usage(const struct bpf_map *map)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
return struct_size(m, xsk_map, map->max_entries) +
(u64)atomic_read(&m->count) * sizeof(struct xsk_map_node);
}
static void xsk_map_free(struct bpf_map *map) static void xsk_map_free(struct bpf_map *map)
{ {
struct xsk_map *m = container_of(map, struct xsk_map, map); struct xsk_map *m = container_of(map, struct xsk_map, map);
...@@ -267,6 +279,7 @@ const struct bpf_map_ops xsk_map_ops = { ...@@ -267,6 +279,7 @@ const struct bpf_map_ops xsk_map_ops = {
.map_update_elem = xsk_map_update_elem, .map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem, .map_delete_elem = xsk_map_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = xsk_map_mem_usage,
.map_btf_id = &xsk_map_btf_ids[0], .map_btf_id = &xsk_map_btf_ids[0],
.map_redirect = xsk_map_redirect, .map_redirect = xsk_map_redirect,
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment