Commit ed69e066 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Andrii Nakryiko says:

====================
pull-request: bpf-next 2023-03-08

We've added 23 non-merge commits during the last 2 day(s) which contain
a total of 28 files changed, 414 insertions(+), 104 deletions(-).

The main changes are:

1) Add more precise memory usage reporting for all BPF map types,
   from Yafang Shao.

2) Add ARM32 USDT support to libbpf, from Puranjay Mohan.

3) Fix BTF_ID_LIST size causing problems in !CONFIG_DEBUG_INFO_BTF,
   from Nathan Chancellor.

4) IMA selftests fix, from Roberto Sassu.

5) libbpf fix in APK support code, from Daniel Müller.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (23 commits)
  selftests/bpf: Fix IMA test
  libbpf: USDT arm arg parsing support
  libbpf: Refactor parse_usdt_arg() to re-use code
  libbpf: Fix theoretical u32 underflow in find_cd() function
  bpf: enforce all maps having memory usage callback
  bpf: offload map memory usage
  bpf, net: xskmap memory usage
  bpf, net: sock_map memory usage
  bpf, net: bpf_local_storage memory usage
  bpf: local_storage memory usage
  bpf: bpf_struct_ops memory usage
  bpf: queue_stack_maps memory usage
  bpf: devmap memory usage
  bpf: cpumap memory usage
  bpf: bloom_filter memory usage
  bpf: ringbuf memory usage
  bpf: reuseport_array memory usage
  bpf: stackmap memory usage
  bpf: arraymap memory usage
  bpf: hashtab memory usage
  ...
====================

Link: https://lore.kernel.org/r/20230308193533.1671597-1-andrii@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 10369080 12fabae0
...@@ -161,6 +161,8 @@ struct bpf_map_ops { ...@@ -161,6 +161,8 @@ struct bpf_map_ops {
bpf_callback_t callback_fn, bpf_callback_t callback_fn,
void *callback_ctx, u64 flags); void *callback_ctx, u64 flags);
u64 (*map_mem_usage)(const struct bpf_map *map);
/* BTF id of struct allocated by map_alloc */ /* BTF id of struct allocated by map_alloc */
int *map_btf_id; int *map_btf_id;
...@@ -2622,6 +2624,7 @@ static inline bool bpf_map_is_offloaded(struct bpf_map *map) ...@@ -2622,6 +2624,7 @@ static inline bool bpf_map_is_offloaded(struct bpf_map *map)
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map); void bpf_map_offload_map_free(struct bpf_map *map);
u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
int bpf_prog_test_run_syscall(struct bpf_prog *prog, int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr, const union bpf_attr *kattr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
...@@ -2693,6 +2696,11 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) ...@@ -2693,6 +2696,11 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
{ {
} }
static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
{
return 0;
}
static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr, const union bpf_attr *kattr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
......
...@@ -164,5 +164,6 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, ...@@ -164,5 +164,6 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
void *value, u64 map_flags, gfp_t gfp_flags); void *value, u64 map_flags, gfp_t gfp_flags);
void bpf_local_storage_free_rcu(struct rcu_head *rcu); void bpf_local_storage_free_rcu(struct rcu_head *rcu);
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
#endif /* _BPF_LOCAL_STORAGE_H */ #endif /* _BPF_LOCAL_STORAGE_H */
...@@ -204,7 +204,7 @@ extern struct btf_id_set8 name; ...@@ -204,7 +204,7 @@ extern struct btf_id_set8 name;
#else #else
#define BTF_ID_LIST(name) static u32 __maybe_unused name[16]; #define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
#define BTF_ID(prefix, name) #define BTF_ID(prefix, name)
#define BTF_ID_FLAGS(prefix, name, ...) #define BTF_ID_FLAGS(prefix, name, ...)
#define BTF_ID_UNUSED #define BTF_ID_UNUSED
......
...@@ -38,6 +38,7 @@ struct xdp_umem { ...@@ -38,6 +38,7 @@ struct xdp_umem {
struct xsk_map { struct xsk_map {
struct bpf_map map; struct bpf_map map;
spinlock_t lock; /* Synchronize map updates */ spinlock_t lock; /* Synchronize map updates */
atomic_t count;
struct xdp_sock __rcu *xsk_map[]; struct xdp_sock __rcu *xsk_map[];
}; };
......
...@@ -721,6 +721,28 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_ ...@@ -721,6 +721,28 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_
return num_elems; return num_elems;
} }
static u64 array_map_mem_usage(const struct bpf_map *map)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
u32 elem_size = array->elem_size;
u64 entries = map->max_entries;
u64 usage = sizeof(*array);
if (percpu) {
usage += entries * sizeof(void *);
usage += entries * elem_size * num_possible_cpus();
} else {
if (map->map_flags & BPF_F_MMAPABLE) {
usage = PAGE_ALIGN(usage);
usage += PAGE_ALIGN(entries * elem_size);
} else {
usage += entries * elem_size;
}
}
return usage;
}
BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array) BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
const struct bpf_map_ops array_map_ops = { const struct bpf_map_ops array_map_ops = {
.map_meta_equal = array_map_meta_equal, .map_meta_equal = array_map_meta_equal,
...@@ -742,6 +764,7 @@ const struct bpf_map_ops array_map_ops = { ...@@ -742,6 +764,7 @@ const struct bpf_map_ops array_map_ops = {
.map_update_batch = generic_map_update_batch, .map_update_batch = generic_map_update_batch,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_array_elem, .map_for_each_callback = bpf_for_each_array_elem,
.map_mem_usage = array_map_mem_usage,
.map_btf_id = &array_map_btf_ids[0], .map_btf_id = &array_map_btf_ids[0],
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
}; };
...@@ -762,6 +785,7 @@ const struct bpf_map_ops percpu_array_map_ops = { ...@@ -762,6 +785,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_update_batch = generic_map_update_batch, .map_update_batch = generic_map_update_batch,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_array_elem, .map_for_each_callback = bpf_for_each_array_elem,
.map_mem_usage = array_map_mem_usage,
.map_btf_id = &array_map_btf_ids[0], .map_btf_id = &array_map_btf_ids[0],
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
}; };
...@@ -1156,6 +1180,7 @@ const struct bpf_map_ops prog_array_map_ops = { ...@@ -1156,6 +1180,7 @@ const struct bpf_map_ops prog_array_map_ops = {
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
.map_release_uref = prog_array_map_clear, .map_release_uref = prog_array_map_clear,
.map_seq_show_elem = prog_array_map_seq_show_elem, .map_seq_show_elem = prog_array_map_seq_show_elem,
.map_mem_usage = array_map_mem_usage,
.map_btf_id = &array_map_btf_ids[0], .map_btf_id = &array_map_btf_ids[0],
}; };
...@@ -1257,6 +1282,7 @@ const struct bpf_map_ops perf_event_array_map_ops = { ...@@ -1257,6 +1282,7 @@ const struct bpf_map_ops perf_event_array_map_ops = {
.map_fd_put_ptr = perf_event_fd_array_put_ptr, .map_fd_put_ptr = perf_event_fd_array_put_ptr,
.map_release = perf_event_fd_array_release, .map_release = perf_event_fd_array_release,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = array_map_mem_usage,
.map_btf_id = &array_map_btf_ids[0], .map_btf_id = &array_map_btf_ids[0],
}; };
...@@ -1291,6 +1317,7 @@ const struct bpf_map_ops cgroup_array_map_ops = { ...@@ -1291,6 +1317,7 @@ const struct bpf_map_ops cgroup_array_map_ops = {
.map_fd_get_ptr = cgroup_fd_array_get_ptr, .map_fd_get_ptr = cgroup_fd_array_get_ptr,
.map_fd_put_ptr = cgroup_fd_array_put_ptr, .map_fd_put_ptr = cgroup_fd_array_put_ptr,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = array_map_mem_usage,
.map_btf_id = &array_map_btf_ids[0], .map_btf_id = &array_map_btf_ids[0],
}; };
#endif #endif
...@@ -1379,5 +1406,6 @@ const struct bpf_map_ops array_of_maps_map_ops = { ...@@ -1379,5 +1406,6 @@ const struct bpf_map_ops array_of_maps_map_ops = {
.map_lookup_batch = generic_map_lookup_batch, .map_lookup_batch = generic_map_lookup_batch,
.map_update_batch = generic_map_update_batch, .map_update_batch = generic_map_update_batch,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = array_map_mem_usage,
.map_btf_id = &array_map_btf_ids[0], .map_btf_id = &array_map_btf_ids[0],
}; };
...@@ -193,6 +193,17 @@ static int bloom_map_check_btf(const struct bpf_map *map, ...@@ -193,6 +193,17 @@ static int bloom_map_check_btf(const struct bpf_map *map,
return btf_type_is_void(key_type) ? 0 : -EINVAL; return btf_type_is_void(key_type) ? 0 : -EINVAL;
} }
static u64 bloom_map_mem_usage(const struct bpf_map *map)
{
struct bpf_bloom_filter *bloom;
u64 bitset_bytes;
bloom = container_of(map, struct bpf_bloom_filter, map);
bitset_bytes = BITS_TO_BYTES((u64)bloom->bitset_mask + 1);
bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long));
return sizeof(*bloom) + bitset_bytes;
}
BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter) BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
const struct bpf_map_ops bloom_filter_map_ops = { const struct bpf_map_ops bloom_filter_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -206,5 +217,6 @@ const struct bpf_map_ops bloom_filter_map_ops = { ...@@ -206,5 +217,6 @@ const struct bpf_map_ops bloom_filter_map_ops = {
.map_update_elem = bloom_map_update_elem, .map_update_elem = bloom_map_update_elem,
.map_delete_elem = bloom_map_delete_elem, .map_delete_elem = bloom_map_delete_elem,
.map_check_btf = bloom_map_check_btf, .map_check_btf = bloom_map_check_btf,
.map_mem_usage = bloom_map_mem_usage,
.map_btf_id = &bpf_bloom_map_btf_ids[0], .map_btf_id = &bpf_bloom_map_btf_ids[0],
}; };
...@@ -221,6 +221,7 @@ const struct bpf_map_ops cgrp_storage_map_ops = { ...@@ -221,6 +221,7 @@ const struct bpf_map_ops cgrp_storage_map_ops = {
.map_update_elem = bpf_cgrp_storage_update_elem, .map_update_elem = bpf_cgrp_storage_update_elem,
.map_delete_elem = bpf_cgrp_storage_delete_elem, .map_delete_elem = bpf_cgrp_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf, .map_check_btf = bpf_local_storage_map_check_btf,
.map_mem_usage = bpf_local_storage_map_mem_usage,
.map_btf_id = &bpf_local_storage_map_btf_id[0], .map_btf_id = &bpf_local_storage_map_btf_id[0],
.map_owner_storage_ptr = cgroup_storage_ptr, .map_owner_storage_ptr = cgroup_storage_ptr,
}; };
......
...@@ -223,6 +223,7 @@ const struct bpf_map_ops inode_storage_map_ops = { ...@@ -223,6 +223,7 @@ const struct bpf_map_ops inode_storage_map_ops = {
.map_update_elem = bpf_fd_inode_storage_update_elem, .map_update_elem = bpf_fd_inode_storage_update_elem,
.map_delete_elem = bpf_fd_inode_storage_delete_elem, .map_delete_elem = bpf_fd_inode_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf, .map_check_btf = bpf_local_storage_map_check_btf,
.map_mem_usage = bpf_local_storage_map_mem_usage,
.map_btf_id = &bpf_local_storage_map_btf_id[0], .map_btf_id = &bpf_local_storage_map_btf_id[0],
.map_owner_storage_ptr = inode_storage_ptr, .map_owner_storage_ptr = inode_storage_ptr,
}; };
......
...@@ -685,6 +685,16 @@ bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage) ...@@ -685,6 +685,16 @@ bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage)
return free_storage; return free_storage;
} }
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
{
struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
u64 usage = sizeof(*smap);
/* The dynamically callocated selems are not counted currently. */
usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
return usage;
}
struct bpf_map * struct bpf_map *
bpf_local_storage_map_alloc(union bpf_attr *attr, bpf_local_storage_map_alloc(union bpf_attr *attr,
struct bpf_local_storage_cache *cache) struct bpf_local_storage_cache *cache)
......
...@@ -641,6 +641,21 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) ...@@ -641,6 +641,21 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
return map; return map;
} }
static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
const struct bpf_struct_ops *st_ops = st_map->st_ops;
const struct btf_type *vt = st_ops->value_type;
u64 usage;
usage = sizeof(*st_map) +
vt->size - sizeof(struct bpf_struct_ops_value);
usage += vt->size;
usage += btf_type_vlen(vt) * sizeof(struct bpf_links *);
usage += PAGE_SIZE;
return usage;
}
BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map) BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
const struct bpf_map_ops bpf_struct_ops_map_ops = { const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_alloc_check = bpf_struct_ops_map_alloc_check, .map_alloc_check = bpf_struct_ops_map_alloc_check,
...@@ -651,6 +666,7 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = { ...@@ -651,6 +666,7 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_delete_elem = bpf_struct_ops_map_delete_elem, .map_delete_elem = bpf_struct_ops_map_delete_elem,
.map_update_elem = bpf_struct_ops_map_update_elem, .map_update_elem = bpf_struct_ops_map_update_elem,
.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem, .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
.map_mem_usage = bpf_struct_ops_map_mem_usage,
.map_btf_id = &bpf_struct_ops_map_btf_ids[0], .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
}; };
......
...@@ -335,6 +335,7 @@ const struct bpf_map_ops task_storage_map_ops = { ...@@ -335,6 +335,7 @@ const struct bpf_map_ops task_storage_map_ops = {
.map_update_elem = bpf_pid_task_storage_update_elem, .map_update_elem = bpf_pid_task_storage_update_elem,
.map_delete_elem = bpf_pid_task_storage_delete_elem, .map_delete_elem = bpf_pid_task_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf, .map_check_btf = bpf_local_storage_map_check_btf,
.map_mem_usage = bpf_local_storage_map_mem_usage,
.map_btf_id = &bpf_local_storage_map_btf_id[0], .map_btf_id = &bpf_local_storage_map_btf_id[0],
.map_owner_storage_ptr = task_storage_ptr, .map_owner_storage_ptr = task_storage_ptr,
}; };
......
...@@ -673,6 +673,15 @@ static int cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags) ...@@ -673,6 +673,15 @@ static int cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
__cpu_map_lookup_elem); __cpu_map_lookup_elem);
} }
static u64 cpu_map_mem_usage(const struct bpf_map *map)
{
u64 usage = sizeof(struct bpf_cpu_map);
/* Currently the dynamically allocated elements are not counted */
usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *);
return usage;
}
BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map) BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
const struct bpf_map_ops cpu_map_ops = { const struct bpf_map_ops cpu_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -683,6 +692,7 @@ const struct bpf_map_ops cpu_map_ops = { ...@@ -683,6 +692,7 @@ const struct bpf_map_ops cpu_map_ops = {
.map_lookup_elem = cpu_map_lookup_elem, .map_lookup_elem = cpu_map_lookup_elem,
.map_get_next_key = cpu_map_get_next_key, .map_get_next_key = cpu_map_get_next_key,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = cpu_map_mem_usage,
.map_btf_id = &cpu_map_btf_ids[0], .map_btf_id = &cpu_map_btf_ids[0],
.map_redirect = cpu_map_redirect, .map_redirect = cpu_map_redirect,
}; };
......
...@@ -819,8 +819,10 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key) ...@@ -819,8 +819,10 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
return -EINVAL; return -EINVAL;
old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL)); old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
if (old_dev) if (old_dev) {
call_rcu(&old_dev->rcu, __dev_map_entry_free); call_rcu(&old_dev->rcu, __dev_map_entry_free);
atomic_dec((atomic_t *)&dtab->items);
}
return 0; return 0;
} }
...@@ -931,6 +933,8 @@ static int __dev_map_update_elem(struct net *net, struct bpf_map *map, ...@@ -931,6 +933,8 @@ static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev))); old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
if (old_dev) if (old_dev)
call_rcu(&old_dev->rcu, __dev_map_entry_free); call_rcu(&old_dev->rcu, __dev_map_entry_free);
else
atomic_inc((atomic_t *)&dtab->items);
return 0; return 0;
} }
...@@ -1016,6 +1020,20 @@ static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags) ...@@ -1016,6 +1020,20 @@ static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
__dev_map_hash_lookup_elem); __dev_map_hash_lookup_elem);
} }
static u64 dev_map_mem_usage(const struct bpf_map *map)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
u64 usage = sizeof(struct bpf_dtab);
if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
else
usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
usage += atomic_read((atomic_t *)&dtab->items) *
(u64)sizeof(struct bpf_dtab_netdev);
return usage;
}
BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab) BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
const struct bpf_map_ops dev_map_ops = { const struct bpf_map_ops dev_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -1026,6 +1044,7 @@ const struct bpf_map_ops dev_map_ops = { ...@@ -1026,6 +1044,7 @@ const struct bpf_map_ops dev_map_ops = {
.map_update_elem = dev_map_update_elem, .map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem, .map_delete_elem = dev_map_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = dev_map_mem_usage,
.map_btf_id = &dev_map_btf_ids[0], .map_btf_id = &dev_map_btf_ids[0],
.map_redirect = dev_map_redirect, .map_redirect = dev_map_redirect,
}; };
...@@ -1039,6 +1058,7 @@ const struct bpf_map_ops dev_map_hash_ops = { ...@@ -1039,6 +1058,7 @@ const struct bpf_map_ops dev_map_hash_ops = {
.map_update_elem = dev_map_hash_update_elem, .map_update_elem = dev_map_hash_update_elem,
.map_delete_elem = dev_map_hash_delete_elem, .map_delete_elem = dev_map_hash_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = dev_map_mem_usage,
.map_btf_id = &dev_map_btf_ids[0], .map_btf_id = &dev_map_btf_ids[0],
.map_redirect = dev_hash_map_redirect, .map_redirect = dev_hash_map_redirect,
}; };
...@@ -1109,9 +1129,11 @@ static int dev_map_notification(struct notifier_block *notifier, ...@@ -1109,9 +1129,11 @@ static int dev_map_notification(struct notifier_block *notifier,
if (!dev || netdev != dev->dev) if (!dev || netdev != dev->dev)
continue; continue;
odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL)); odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
if (dev == odev) if (dev == odev) {
call_rcu(&dev->rcu, call_rcu(&dev->rcu,
__dev_map_entry_free); __dev_map_entry_free);
atomic_dec((atomic_t *)&dtab->items);
}
} }
} }
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -2190,6 +2190,44 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_f ...@@ -2190,6 +2190,44 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_f
return num_elems; return num_elems;
} }
static u64 htab_map_mem_usage(const struct bpf_map *map)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
u32 value_size = round_up(htab->map.value_size, 8);
bool prealloc = htab_is_prealloc(htab);
bool percpu = htab_is_percpu(htab);
bool lru = htab_is_lru(htab);
u64 num_entries;
u64 usage = sizeof(struct bpf_htab);
usage += sizeof(struct bucket) * htab->n_buckets;
usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
if (prealloc) {
num_entries = map->max_entries;
if (htab_has_extra_elems(htab))
num_entries += num_possible_cpus();
usage += htab->elem_size * num_entries;
if (percpu)
usage += value_size * num_possible_cpus() * num_entries;
else if (!lru)
usage += sizeof(struct htab_elem *) * num_possible_cpus();
} else {
#define LLIST_NODE_SZ sizeof(struct llist_node)
num_entries = htab->use_percpu_counter ?
percpu_counter_sum(&htab->pcount) :
atomic_read(&htab->count);
usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
if (percpu) {
usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
usage += value_size * num_possible_cpus() * num_entries;
}
}
return usage;
}
BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab) BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
const struct bpf_map_ops htab_map_ops = { const struct bpf_map_ops htab_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -2206,6 +2244,7 @@ const struct bpf_map_ops htab_map_ops = { ...@@ -2206,6 +2244,7 @@ const struct bpf_map_ops htab_map_ops = {
.map_seq_show_elem = htab_map_seq_show_elem, .map_seq_show_elem = htab_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab), BATCH_OPS(htab),
.map_btf_id = &htab_map_btf_ids[0], .map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
...@@ -2227,6 +2266,7 @@ const struct bpf_map_ops htab_lru_map_ops = { ...@@ -2227,6 +2266,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_seq_show_elem = htab_map_seq_show_elem, .map_seq_show_elem = htab_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab_lru), BATCH_OPS(htab_lru),
.map_btf_id = &htab_map_btf_ids[0], .map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
...@@ -2378,6 +2418,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { ...@@ -2378,6 +2418,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_seq_show_elem = htab_percpu_map_seq_show_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab_percpu), BATCH_OPS(htab_percpu),
.map_btf_id = &htab_map_btf_ids[0], .map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
...@@ -2397,6 +2438,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { ...@@ -2397,6 +2438,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_seq_show_elem = htab_percpu_map_seq_show_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab_lru_percpu), BATCH_OPS(htab_lru_percpu),
.map_btf_id = &htab_map_btf_ids[0], .map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info, .iter_seq_info = &iter_seq_info,
...@@ -2534,6 +2576,7 @@ const struct bpf_map_ops htab_of_maps_map_ops = { ...@@ -2534,6 +2576,7 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = htab_of_map_gen_lookup, .map_gen_lookup = htab_of_map_gen_lookup,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = htab_map_mem_usage,
BATCH_OPS(htab), BATCH_OPS(htab),
.map_btf_id = &htab_map_btf_ids[0], .map_btf_id = &htab_map_btf_ids[0],
}; };
...@@ -446,6 +446,12 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, ...@@ -446,6 +446,12 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock(); rcu_read_unlock();
} }
static u64 cgroup_storage_map_usage(const struct bpf_map *map)
{
/* Currently the dynamically allocated elements are not counted. */
return sizeof(struct bpf_cgroup_storage_map);
}
BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct, BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct,
bpf_cgroup_storage_map) bpf_cgroup_storage_map)
const struct bpf_map_ops cgroup_storage_map_ops = { const struct bpf_map_ops cgroup_storage_map_ops = {
...@@ -457,6 +463,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = { ...@@ -457,6 +463,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
.map_delete_elem = cgroup_storage_delete_elem, .map_delete_elem = cgroup_storage_delete_elem,
.map_check_btf = cgroup_storage_check_btf, .map_check_btf = cgroup_storage_check_btf,
.map_seq_show_elem = cgroup_storage_seq_show_elem, .map_seq_show_elem = cgroup_storage_seq_show_elem,
.map_mem_usage = cgroup_storage_map_usage,
.map_btf_id = &cgroup_storage_map_btf_ids[0], .map_btf_id = &cgroup_storage_map_btf_ids[0],
}; };
......
...@@ -720,6 +720,16 @@ static int trie_check_btf(const struct bpf_map *map, ...@@ -720,6 +720,16 @@ static int trie_check_btf(const struct bpf_map *map,
-EINVAL : 0; -EINVAL : 0;
} }
static u64 trie_mem_usage(const struct bpf_map *map)
{
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
u64 elem_size;
elem_size = sizeof(struct lpm_trie_node) + trie->data_size +
trie->map.value_size;
return elem_size * READ_ONCE(trie->n_entries);
}
BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie) BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie)
const struct bpf_map_ops trie_map_ops = { const struct bpf_map_ops trie_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -733,5 +743,6 @@ const struct bpf_map_ops trie_map_ops = { ...@@ -733,5 +743,6 @@ const struct bpf_map_ops trie_map_ops = {
.map_update_batch = generic_map_update_batch, .map_update_batch = generic_map_update_batch,
.map_delete_batch = generic_map_delete_batch, .map_delete_batch = generic_map_delete_batch,
.map_check_btf = trie_check_btf, .map_check_btf = trie_check_btf,
.map_mem_usage = trie_mem_usage,
.map_btf_id = &trie_map_btf_ids[0], .map_btf_id = &trie_map_btf_ids[0],
}; };
...@@ -563,6 +563,12 @@ void bpf_map_offload_map_free(struct bpf_map *map) ...@@ -563,6 +563,12 @@ void bpf_map_offload_map_free(struct bpf_map *map)
bpf_map_area_free(offmap); bpf_map_area_free(offmap);
} }
u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
{
/* The memory dynamically allocated in netdev dev_ops is not counted */
return sizeof(struct bpf_offloaded_map);
}
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
{ {
struct bpf_offloaded_map *offmap = map_to_offmap(map); struct bpf_offloaded_map *offmap = map_to_offmap(map);
......
...@@ -246,6 +246,14 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key, ...@@ -246,6 +246,14 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
return -EINVAL; return -EINVAL;
} }
static u64 queue_stack_map_mem_usage(const struct bpf_map *map)
{
u64 usage = sizeof(struct bpf_queue_stack);
usage += ((u64)map->max_entries + 1) * map->value_size;
return usage;
}
BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack) BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
const struct bpf_map_ops queue_map_ops = { const struct bpf_map_ops queue_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -259,6 +267,7 @@ const struct bpf_map_ops queue_map_ops = { ...@@ -259,6 +267,7 @@ const struct bpf_map_ops queue_map_ops = {
.map_pop_elem = queue_map_pop_elem, .map_pop_elem = queue_map_pop_elem,
.map_peek_elem = queue_map_peek_elem, .map_peek_elem = queue_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key, .map_get_next_key = queue_stack_map_get_next_key,
.map_mem_usage = queue_stack_map_mem_usage,
.map_btf_id = &queue_map_btf_ids[0], .map_btf_id = &queue_map_btf_ids[0],
}; };
...@@ -274,5 +283,6 @@ const struct bpf_map_ops stack_map_ops = { ...@@ -274,5 +283,6 @@ const struct bpf_map_ops stack_map_ops = {
.map_pop_elem = stack_map_pop_elem, .map_pop_elem = stack_map_pop_elem,
.map_peek_elem = stack_map_peek_elem, .map_peek_elem = stack_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key, .map_get_next_key = queue_stack_map_get_next_key,
.map_mem_usage = queue_stack_map_mem_usage,
.map_btf_id = &queue_map_btf_ids[0], .map_btf_id = &queue_map_btf_ids[0],
}; };
...@@ -335,6 +335,13 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key, ...@@ -335,6 +335,13 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
return 0; return 0;
} }
static u64 reuseport_array_mem_usage(const struct bpf_map *map)
{
struct reuseport_array *array;
return struct_size(array, ptrs, map->max_entries);
}
BTF_ID_LIST_SINGLE(reuseport_array_map_btf_ids, struct, reuseport_array) BTF_ID_LIST_SINGLE(reuseport_array_map_btf_ids, struct, reuseport_array)
const struct bpf_map_ops reuseport_array_ops = { const struct bpf_map_ops reuseport_array_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -344,5 +351,6 @@ const struct bpf_map_ops reuseport_array_ops = { ...@@ -344,5 +351,6 @@ const struct bpf_map_ops reuseport_array_ops = {
.map_lookup_elem = reuseport_array_lookup_elem, .map_lookup_elem = reuseport_array_lookup_elem,
.map_get_next_key = reuseport_array_get_next_key, .map_get_next_key = reuseport_array_get_next_key,
.map_delete_elem = reuseport_array_delete_elem, .map_delete_elem = reuseport_array_delete_elem,
.map_mem_usage = reuseport_array_mem_usage,
.map_btf_id = &reuseport_array_map_btf_ids[0], .map_btf_id = &reuseport_array_map_btf_ids[0],
}; };
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
(offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT) (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
/* consumer page and producer page */ /* consumer page and producer page */
#define RINGBUF_POS_PAGES 2 #define RINGBUF_POS_PAGES 2
#define RINGBUF_NR_META_PAGES (RINGBUF_PGOFF + RINGBUF_POS_PAGES)
#define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4) #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
...@@ -96,7 +97,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node) ...@@ -96,7 +97,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
{ {
const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL | const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
__GFP_NOWARN | __GFP_ZERO; __GFP_NOWARN | __GFP_ZERO;
int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES; int nr_meta_pages = RINGBUF_NR_META_PAGES;
int nr_data_pages = data_sz >> PAGE_SHIFT; int nr_data_pages = data_sz >> PAGE_SHIFT;
int nr_pages = nr_meta_pages + nr_data_pages; int nr_pages = nr_meta_pages + nr_data_pages;
struct page **pages, *page; struct page **pages, *page;
...@@ -336,6 +337,21 @@ static __poll_t ringbuf_map_poll_user(struct bpf_map *map, struct file *filp, ...@@ -336,6 +337,21 @@ static __poll_t ringbuf_map_poll_user(struct bpf_map *map, struct file *filp,
return 0; return 0;
} }
static u64 ringbuf_map_mem_usage(const struct bpf_map *map)
{
struct bpf_ringbuf *rb;
int nr_data_pages;
int nr_meta_pages;
u64 usage = sizeof(struct bpf_ringbuf_map);
rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
usage += (u64)rb->nr_pages << PAGE_SHIFT;
nr_meta_pages = RINGBUF_NR_META_PAGES;
nr_data_pages = map->max_entries >> PAGE_SHIFT;
usage += (nr_meta_pages + 2 * nr_data_pages) * sizeof(struct page *);
return usage;
}
BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map) BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
const struct bpf_map_ops ringbuf_map_ops = { const struct bpf_map_ops ringbuf_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -347,6 +363,7 @@ const struct bpf_map_ops ringbuf_map_ops = { ...@@ -347,6 +363,7 @@ const struct bpf_map_ops ringbuf_map_ops = {
.map_update_elem = ringbuf_map_update_elem, .map_update_elem = ringbuf_map_update_elem,
.map_delete_elem = ringbuf_map_delete_elem, .map_delete_elem = ringbuf_map_delete_elem,
.map_get_next_key = ringbuf_map_get_next_key, .map_get_next_key = ringbuf_map_get_next_key,
.map_mem_usage = ringbuf_map_mem_usage,
.map_btf_id = &ringbuf_map_btf_ids[0], .map_btf_id = &ringbuf_map_btf_ids[0],
}; };
...@@ -361,6 +378,7 @@ const struct bpf_map_ops user_ringbuf_map_ops = { ...@@ -361,6 +378,7 @@ const struct bpf_map_ops user_ringbuf_map_ops = {
.map_update_elem = ringbuf_map_update_elem, .map_update_elem = ringbuf_map_update_elem,
.map_delete_elem = ringbuf_map_delete_elem, .map_delete_elem = ringbuf_map_delete_elem,
.map_get_next_key = ringbuf_map_get_next_key, .map_get_next_key = ringbuf_map_get_next_key,
.map_mem_usage = ringbuf_map_mem_usage,
.map_btf_id = &user_ringbuf_map_btf_ids[0], .map_btf_id = &user_ringbuf_map_btf_ids[0],
}; };
......
...@@ -654,6 +654,19 @@ static void stack_map_free(struct bpf_map *map) ...@@ -654,6 +654,19 @@ static void stack_map_free(struct bpf_map *map)
put_callchain_buffers(); put_callchain_buffers();
} }
static u64 stack_map_mem_usage(const struct bpf_map *map)
{
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
u64 value_size = map->value_size;
u64 n_buckets = smap->n_buckets;
u64 enties = map->max_entries;
u64 usage = sizeof(*smap);
usage += n_buckets * sizeof(struct stack_map_bucket *);
usage += enties * (sizeof(struct stack_map_bucket) + value_size);
return usage;
}
BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map) BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
const struct bpf_map_ops stack_trace_map_ops = { const struct bpf_map_ops stack_trace_map_ops = {
.map_meta_equal = bpf_map_meta_equal, .map_meta_equal = bpf_map_meta_equal,
...@@ -664,5 +677,6 @@ const struct bpf_map_ops stack_trace_map_ops = { ...@@ -664,5 +677,6 @@ const struct bpf_map_ops stack_trace_map_ops = {
.map_update_elem = stack_map_update_elem, .map_update_elem = stack_map_update_elem,
.map_delete_elem = stack_map_delete_elem, .map_delete_elem = stack_map_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = stack_map_mem_usage,
.map_btf_id = &stack_trace_map_btf_ids[0], .map_btf_id = &stack_trace_map_btf_ids[0],
}; };
...@@ -105,6 +105,7 @@ const struct bpf_map_ops bpf_map_offload_ops = { ...@@ -105,6 +105,7 @@ const struct bpf_map_ops bpf_map_offload_ops = {
.map_alloc = bpf_map_offload_map_alloc, .map_alloc = bpf_map_offload_map_alloc,
.map_free = bpf_map_offload_map_free, .map_free = bpf_map_offload_map_free,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = bpf_map_offload_map_mem_usage,
}; };
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
...@@ -128,6 +129,8 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) ...@@ -128,6 +129,8 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
} }
if (attr->map_ifindex) if (attr->map_ifindex)
ops = &bpf_map_offload_ops; ops = &bpf_map_offload_ops;
if (!ops->map_mem_usage)
return ERR_PTR(-EINVAL);
map = ops->map_alloc(attr); map = ops->map_alloc(attr);
if (IS_ERR(map)) if (IS_ERR(map))
return map; return map;
...@@ -771,17 +774,10 @@ static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) ...@@ -771,17 +774,10 @@ static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* Provides an approximation of the map's memory footprint. /* Show the memory usage of a bpf map */
* Used only to provide a backward compatibility and display static u64 bpf_map_memory_usage(const struct bpf_map *map)
* a reasonable "memlock" info.
*/
static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
{ {
unsigned long size; return map->ops->map_mem_usage(map);
size = round_up(map->key_size + bpf_map_value_size(map), 8);
return round_up(map->max_entries * size, PAGE_SIZE);
} }
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
...@@ -803,7 +799,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) ...@@ -803,7 +799,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
"max_entries:\t%u\n" "max_entries:\t%u\n"
"map_flags:\t%#x\n" "map_flags:\t%#x\n"
"map_extra:\t%#llx\n" "map_extra:\t%#llx\n"
"memlock:\t%lu\n" "memlock:\t%llu\n"
"map_id:\t%u\n" "map_id:\t%u\n"
"frozen:\t%u\n", "frozen:\t%u\n",
map->map_type, map->map_type,
...@@ -812,7 +808,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) ...@@ -812,7 +808,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
map->max_entries, map->max_entries,
map->map_flags, map->map_flags,
(unsigned long long)map->map_extra, (unsigned long long)map->map_extra,
bpf_map_memory_footprint(map), bpf_map_memory_usage(map),
map->id, map->id,
READ_ONCE(map->frozen)); READ_ONCE(map->frozen));
if (type) { if (type) {
......
...@@ -324,6 +324,7 @@ const struct bpf_map_ops sk_storage_map_ops = { ...@@ -324,6 +324,7 @@ const struct bpf_map_ops sk_storage_map_ops = {
.map_local_storage_charge = bpf_sk_storage_charge, .map_local_storage_charge = bpf_sk_storage_charge,
.map_local_storage_uncharge = bpf_sk_storage_uncharge, .map_local_storage_uncharge = bpf_sk_storage_uncharge,
.map_owner_storage_ptr = bpf_sk_storage_ptr, .map_owner_storage_ptr = bpf_sk_storage_ptr,
.map_mem_usage = bpf_local_storage_map_mem_usage,
}; };
const struct bpf_func_proto bpf_sk_storage_get_proto = { const struct bpf_func_proto bpf_sk_storage_get_proto = {
......
...@@ -797,6 +797,14 @@ static void sock_map_fini_seq_private(void *priv_data) ...@@ -797,6 +797,14 @@ static void sock_map_fini_seq_private(void *priv_data)
bpf_map_put_with_uref(info->map); bpf_map_put_with_uref(info->map);
} }
static u64 sock_map_mem_usage(const struct bpf_map *map)
{
u64 usage = sizeof(struct bpf_stab);
usage += (u64)map->max_entries * sizeof(struct sock *);
return usage;
}
static const struct bpf_iter_seq_info sock_map_iter_seq_info = { static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
.seq_ops = &sock_map_seq_ops, .seq_ops = &sock_map_seq_ops,
.init_seq_private = sock_map_init_seq_private, .init_seq_private = sock_map_init_seq_private,
...@@ -816,6 +824,7 @@ const struct bpf_map_ops sock_map_ops = { ...@@ -816,6 +824,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_lookup_elem = sock_map_lookup, .map_lookup_elem = sock_map_lookup,
.map_release_uref = sock_map_release_progs, .map_release_uref = sock_map_release_progs,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = sock_map_mem_usage,
.map_btf_id = &sock_map_btf_ids[0], .map_btf_id = &sock_map_btf_ids[0],
.iter_seq_info = &sock_map_iter_seq_info, .iter_seq_info = &sock_map_iter_seq_info,
}; };
...@@ -1397,6 +1406,16 @@ static void sock_hash_fini_seq_private(void *priv_data) ...@@ -1397,6 +1406,16 @@ static void sock_hash_fini_seq_private(void *priv_data)
bpf_map_put_with_uref(info->map); bpf_map_put_with_uref(info->map);
} }
static u64 sock_hash_mem_usage(const struct bpf_map *map)
{
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
u64 usage = sizeof(*htab);
usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
usage += atomic_read(&htab->count) * (u64)htab->elem_size;
return usage;
}
static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
.seq_ops = &sock_hash_seq_ops, .seq_ops = &sock_hash_seq_ops,
.init_seq_private = sock_hash_init_seq_private, .init_seq_private = sock_hash_init_seq_private,
...@@ -1416,6 +1435,7 @@ const struct bpf_map_ops sock_hash_ops = { ...@@ -1416,6 +1435,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_lookup_elem_sys_only = sock_hash_lookup_sys, .map_lookup_elem_sys_only = sock_hash_lookup_sys,
.map_release_uref = sock_hash_release_progs, .map_release_uref = sock_hash_release_progs,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = sock_hash_mem_usage,
.map_btf_id = &sock_hash_map_btf_ids[0], .map_btf_id = &sock_hash_map_btf_ids[0],
.iter_seq_info = &sock_hash_iter_seq_info, .iter_seq_info = &sock_hash_iter_seq_info,
}; };
......
...@@ -24,6 +24,7 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, ...@@ -24,6 +24,7 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
bpf_map_inc(&map->map); bpf_map_inc(&map->map);
atomic_inc(&map->count);
node->map = map; node->map = map;
node->map_entry = map_entry; node->map_entry = map_entry;
...@@ -32,8 +33,11 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, ...@@ -32,8 +33,11 @@ static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
static void xsk_map_node_free(struct xsk_map_node *node) static void xsk_map_node_free(struct xsk_map_node *node)
{ {
struct xsk_map *map = node->map;
bpf_map_put(&node->map->map); bpf_map_put(&node->map->map);
kfree(node); kfree(node);
atomic_dec(&map->count);
} }
static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node) static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
...@@ -85,6 +89,14 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -85,6 +89,14 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
return &m->map; return &m->map;
} }
static u64 xsk_map_mem_usage(const struct bpf_map *map)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
return struct_size(m, xsk_map, map->max_entries) +
(u64)atomic_read(&m->count) * sizeof(struct xsk_map_node);
}
static void xsk_map_free(struct bpf_map *map) static void xsk_map_free(struct bpf_map *map)
{ {
struct xsk_map *m = container_of(map, struct xsk_map, map); struct xsk_map *m = container_of(map, struct xsk_map, map);
...@@ -267,6 +279,7 @@ const struct bpf_map_ops xsk_map_ops = { ...@@ -267,6 +279,7 @@ const struct bpf_map_ops xsk_map_ops = {
.map_update_elem = xsk_map_update_elem, .map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem, .map_delete_elem = xsk_map_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = xsk_map_mem_usage,
.map_btf_id = &xsk_map_btf_ids[0], .map_btf_id = &xsk_map_btf_ids[0],
.map_redirect = xsk_map_redirect, .map_redirect = xsk_map_redirect,
}; };
This diff is collapsed.
...@@ -168,9 +168,8 @@ static int try_parse_end_of_cd(struct zip_archive *archive, __u32 offset) ...@@ -168,9 +168,8 @@ static int try_parse_end_of_cd(struct zip_archive *archive, __u32 offset)
static int find_cd(struct zip_archive *archive) static int find_cd(struct zip_archive *archive)
{ {
int64_t limit, offset;
int rc = -EINVAL; int rc = -EINVAL;
int64_t limit;
__u32 offset;
if (archive->size <= sizeof(struct end_of_cd_record)) if (archive->size <= sizeof(struct end_of_cd_record))
return -EINVAL; return -EINVAL;
......
...@@ -70,7 +70,7 @@ void test_test_ima(void) ...@@ -70,7 +70,7 @@ void test_test_ima(void)
u64 bin_true_sample; u64 bin_true_sample;
char cmd[256]; char cmd[256];
int err, duration = 0; int err, duration = 0, fresh_digest_idx = 0;
struct ima *skel = NULL; struct ima *skel = NULL;
skel = ima__open_and_load(); skel = ima__open_and_load();
...@@ -129,7 +129,15 @@ void test_test_ima(void) ...@@ -129,7 +129,15 @@ void test_test_ima(void)
/* /*
* Test #3 * Test #3
* - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest * - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest
* - Expected result: 2 samples (/bin/true: non-fresh, fresh) * - Expected result:
* 1 sample (/bin/true: fresh) if commit 62622dab0a28 applied
* 2 samples (/bin/true: non-fresh, fresh) if commit 62622dab0a28 is
* not applied
*
* If commit 62622dab0a28 ("ima: return IMA digest value only when
* IMA_COLLECTED flag is set") is applied, bpf_ima_inode_hash() refuses
* to give a non-fresh digest, hence the correct result is 1 instead of
* 2.
*/ */
test_init(skel->bss); test_init(skel->bss);
...@@ -144,13 +152,18 @@ void test_test_ima(void) ...@@ -144,13 +152,18 @@ void test_test_ima(void)
goto close_clean; goto close_clean;
err = ring_buffer__consume(ringbuf); err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 2, "num_samples_or_err"); ASSERT_GE(err, 1, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); if (err == 2) {
ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, "sample_equal_or_err"); ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample,
"sample_equal_or_err");
fresh_digest_idx = 1;
}
ASSERT_NEQ(ima_hash_from_bpf[fresh_digest_idx], 0, "ima_hash");
/* IMA refreshed the digest. */ /* IMA refreshed the digest. */
ASSERT_NEQ(ima_hash_from_bpf[1], bin_true_sample, ASSERT_NEQ(ima_hash_from_bpf[fresh_digest_idx], bin_true_sample,
"sample_different_or_err"); "sample_equal_or_err");
/* /*
* Test #4 * Test #4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment