Commit fcf1fa29 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'libbpf: capability for resizing datasec maps'

JP Kobryn says:

====================
Due to the way the datasec maps like bss, data, rodata are memory
mapped, they cannot be resized with bpf_map__set_value_size() like
non-datasec maps can. This series offers a way to allow the resizing of
datasec maps, by having the mapped regions resized as needed and also
adjusting associated BTF info if possible.

The thought behind this is to allow for use cases where a given datasec
needs to scale to for example the number of CPU's present. A bpf program
can have a global array in a data section with an initial length and
before loading the bpf program, the array length could be extended to
match the CPU count. The selftests included in this series perform this
scaling to an arbitrary value to demonstrate how it can work.
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 3b22f98e 08b08956
......@@ -1500,16 +1500,36 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
return map;
}
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
{
long page_sz = sysconf(_SC_PAGE_SIZE);
const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
map_sz = (size_t)roundup(value_sz, 8) * max_entries;
map_sz = roundup(map_sz, page_sz);
return map_sz;
}
static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
{
void *mmaped;
if (!map->mmaped)
return -EINVAL;
if (old_sz == new_sz)
return 0;
mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (mmaped == MAP_FAILED)
return -errno;
memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
munmap(map->mmaped, old_sz);
map->mmaped = mmaped;
return 0;
}
static char *internal_map_name(struct bpf_object *obj, const char *real_name)
{
char map_name[BPF_OBJ_NAME_LEN], *p;
......@@ -1608,6 +1628,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
{
struct bpf_map_def *def;
struct bpf_map *map;
size_t mmap_sz;
int err;
map = bpf_object__add_map(obj);
......@@ -1642,7 +1663,8 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map->name, map->sec_idx, map->sec_offset, def->map_flags);
map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (map->mmaped == MAP_FAILED) {
err = -errno;
......@@ -8294,7 +8316,10 @@ static void bpf_map__destroy(struct bpf_map *map)
map->init_slots_sz = 0;
if (map->mmaped) {
munmap(map->mmaped, bpf_map_mmap_sz(map));
size_t mmap_sz;
mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
munmap(map->mmaped, mmap_sz);
map->mmaped = NULL;
}
......@@ -9412,10 +9437,103 @@ __u32 bpf_map__value_size(const struct bpf_map *map)
return map->def.value_size;
}
static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
{
struct btf *btf;
struct btf_type *datasec_type, *var_type;
struct btf_var_secinfo *var;
const struct btf_type *array_type;
const struct btf_array *array;
int vlen, element_sz;
__u32 nr_elements, new_array_id;
/* check btf existence */
btf = bpf_object__btf(map->obj);
if (!btf)
return -ENOENT;
/* verify map is datasec */
datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
if (!btf_is_datasec(datasec_type)) {
pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
bpf_map__name(map));
return -EINVAL;
}
/* verify datasec has at least one var */
vlen = btf_vlen(datasec_type);
if (vlen == 0) {
pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
bpf_map__name(map));
return -EINVAL;
}
/* verify last var in the datasec is an array */
var = &btf_var_secinfos(datasec_type)[vlen - 1];
var_type = btf_type_by_id(btf, var->type);
array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
if (!btf_is_array(array_type)) {
pr_warn("map '%s': cannot be resized, last var must be an array\n",
bpf_map__name(map));
return -EINVAL;
}
/* verify request size aligns with array */
array = btf_array(array_type);
element_sz = btf__resolve_size(btf, array->type);
if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
bpf_map__name(map), element_sz, size);
return -EINVAL;
}
/* create a new array based on the existing array, but with new length */
nr_elements = (size - var->offset) / element_sz;
new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
if (new_array_id < 0)
return new_array_id;
/* adding a new btf type invalidates existing pointers to btf objects,
* so refresh pointers before proceeding
*/
datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
var = &btf_var_secinfos(datasec_type)[vlen - 1];
var_type = btf_type_by_id(btf, var->type);
/* finally update btf info */
datasec_type->size = size;
var->size = size - var->offset;
var_type->type = new_array_id;
return 0;
}
int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
{
if (map->fd >= 0)
return libbpf_err(-EBUSY);
if (map->mmaped) {
int err;
size_t mmap_old_sz, mmap_new_sz;
mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries);
err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
if (err) {
pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
bpf_map__name(map), err);
return err;
}
err = map_btf_datasec_resize(map, size);
if (err && err != -ENOENT) {
pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
bpf_map__name(map), err);
map->btf_value_type_id = 0;
map->btf_key_type_id = 0;
}
}
map->def.value_size = size;
return 0;
}
......@@ -9441,7 +9559,7 @@ int bpf_map__set_initial_value(struct bpf_map *map,
return 0;
}
const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
{
if (!map->mmaped)
return NULL;
......@@ -12693,7 +12811,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
size_t mmap_sz = bpf_map_mmap_sz(map);
size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
int prot, map_fd = bpf_map__fd(map);
void **mmaped = s->maps[i].mmaped;
......@@ -12720,8 +12838,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
* as per normal clean up procedure, so we don't need to worry
* about it from skeleton's clean up perspective.
*/
*mmaped = mmap(map->mmaped, mmap_sz, prot,
MAP_SHARED | MAP_FIXED, map_fd, 0);
*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
if (*mmaped == MAP_FAILED) {
err = -errno;
*mmaped = NULL;
......
......@@ -869,8 +869,22 @@ LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node);
/* get/set map key size */
LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size);
/* get/set map value size */
/* get map value size */
LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
/**
* @brief **bpf_map__set_value_size()** sets map value size.
* @param map the BPF map instance
* @return 0, on success; negative error, otherwise
*
* There is a special case for maps with associated memory-mapped regions, like
* the global data section maps (bss, data, rodata). When this function is used
* on such a map, the mapped region is resized. Afterward, an attempt is made to
* adjust the corresponding BTF info. This attempt is best-effort and can only
* succeed if the last variable of the data section map is an array. The array
* BTF type is replaced by a new BTF array type with a different length.
* Any previously existing pointers returned from bpf_map__initial_value() or
* corresponding data section skeleton pointer must be reinitialized.
*/
LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size);
/* get map key/value BTF type IDs */
LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
......@@ -884,7 +898,7 @@ LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
LIBBPF_API void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
/**
* @brief **bpf_map__is_internal()** tells the caller whether or not the
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <errno.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "test_global_map_resize.skel.h"
#include "test_progs.h"
static void run_prog_bss_array_sum(void)
{
(void)syscall(__NR_getpid);
}
static void run_prog_data_array_sum(void)
{
(void)syscall(__NR_getuid);
}
static void global_map_resize_bss_subtest(void)
{
int err;
struct test_global_map_resize *skel;
struct bpf_map *map;
const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2;
size_t array_len, actual_sz;
skel = test_global_map_resize__open();
if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
goto teardown;
/* set some initial value before resizing.
* it is expected this non-zero value will be preserved
* while resizing.
*/
skel->bss->array[0] = 1;
/* resize map value and verify the new size */
map = skel->maps.bss;
err = bpf_map__set_value_size(map, desired_sz);
if (!ASSERT_OK(err, "bpf_map__set_value_size"))
goto teardown;
if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
goto teardown;
/* set the expected number of elements based on the resized array */
array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]);
if (!ASSERT_GT(array_len, 1, "array_len"))
goto teardown;
skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz);
if (!ASSERT_OK_PTR(skel->bss, "bpf_map__initial_value (ptr)"))
goto teardown;
if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)"))
goto teardown;
/* fill the newly resized array with ones,
* skipping the first element which was previously set
*/
for (int i = 1; i < array_len; i++)
skel->bss->array[i] = 1;
/* set global const values before loading */
skel->rodata->pid = getpid();
skel->rodata->bss_array_len = array_len;
skel->rodata->data_array_len = 1;
err = test_global_map_resize__load(skel);
if (!ASSERT_OK(err, "test_global_map_resize__load"))
goto teardown;
err = test_global_map_resize__attach(skel);
if (!ASSERT_OK(err, "test_global_map_resize__attach"))
goto teardown;
/* run the bpf program which will sum the contents of the array.
* since the array was filled with ones,verify the sum equals array_len
*/
run_prog_bss_array_sum();
if (!ASSERT_EQ(skel->bss->sum, array_len, "sum"))
goto teardown;
teardown:
test_global_map_resize__destroy(skel);
}
static void global_map_resize_data_subtest(void)
{
int err;
struct test_global_map_resize *skel;
struct bpf_map *map;
const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2;
size_t array_len, actual_sz;
skel = test_global_map_resize__open();
if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
goto teardown;
/* set some initial value before resizing.
* it is expected this non-zero value will be preserved
* while resizing.
*/
skel->data_custom->my_array[0] = 1;
/* resize map value and verify the new size */
map = skel->maps.data_custom;
err = bpf_map__set_value_size(map, desired_sz);
if (!ASSERT_OK(err, "bpf_map__set_value_size"))
goto teardown;
if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
goto teardown;
/* set the expected number of elements based on the resized array */
array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]);
if (!ASSERT_GT(array_len, 1, "array_len"))
goto teardown;
skel->data_custom = bpf_map__initial_value(skel->maps.data_custom, &actual_sz);
if (!ASSERT_OK_PTR(skel->data_custom, "bpf_map__initial_value (ptr)"))
goto teardown;
if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)"))
goto teardown;
/* fill the newly resized array with ones,
* skipping the first element which was previously set
*/
for (int i = 1; i < array_len; i++)
skel->data_custom->my_array[i] = 1;
/* set global const values before loading */
skel->rodata->pid = getpid();
skel->rodata->bss_array_len = 1;
skel->rodata->data_array_len = array_len;
err = test_global_map_resize__load(skel);
if (!ASSERT_OK(err, "test_global_map_resize__load"))
goto teardown;
err = test_global_map_resize__attach(skel);
if (!ASSERT_OK(err, "test_global_map_resize__attach"))
goto teardown;
/* run the bpf program which will sum the contents of the array.
* since the array was filled with ones,verify the sum equals array_len
*/
run_prog_data_array_sum();
if (!ASSERT_EQ(skel->bss->sum, array_len, "sum"))
goto teardown;
teardown:
test_global_map_resize__destroy(skel);
}
static void global_map_resize_invalid_subtest(void)
{
int err;
struct test_global_map_resize *skel;
struct bpf_map *map;
__u32 element_sz, desired_sz;
skel = test_global_map_resize__open();
if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
return;
/* attempt to resize a global datasec map to size
* which does NOT align with array
*/
map = skel->maps.data_custom;
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.custom initial btf"))
goto teardown;
/* set desired size a fraction of element size beyond an aligned size */
element_sz = sizeof(skel->data_custom->my_array[0]);
desired_sz = element_sz + element_sz / 2;
/* confirm desired size does NOT align with array */
if (!ASSERT_NEQ(desired_sz % element_sz, 0, "my_array alignment"))
goto teardown;
err = bpf_map__set_value_size(map, desired_sz);
/* confirm resize is OK but BTF info is cleared */
if (!ASSERT_OK(err, ".data.custom bpf_map__set_value_size") ||
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.custom clear btf key") ||
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.custom clear btf val"))
goto teardown;
/* attempt to resize a global datasec map whose only var is NOT an array */
map = skel->maps.data_non_array;
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array initial btf"))
goto teardown;
/* set desired size to arbitrary value */
desired_sz = 1024;
err = bpf_map__set_value_size(map, desired_sz);
/* confirm resize is OK but BTF info is cleared */
if (!ASSERT_OK(err, ".data.non_array bpf_map__set_value_size") ||
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.non_array clear btf key") ||
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array clear btf val"))
goto teardown;
/* attempt to resize a global datasec map
* whose last var is NOT an array
*/
map = skel->maps.data_array_not_last;
if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last initial btf"))
goto teardown;
/* set desired size to a multiple of element size */
element_sz = sizeof(skel->data_array_not_last->my_array_first[0]);
desired_sz = element_sz * 8;
/* confirm desired size aligns with array */
if (!ASSERT_EQ(desired_sz % element_sz, 0, "my_array_first alignment"))
goto teardown;
err = bpf_map__set_value_size(map, desired_sz);
/* confirm resize is OK but BTF info is cleared */
if (!ASSERT_OK(err, ".data.array_not_last bpf_map__set_value_size") ||
!ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.array_not_last clear btf key") ||
!ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last clear btf val"))
goto teardown;
teardown:
test_global_map_resize__destroy(skel);
}
void test_global_map_resize(void)
{
if (test__start_subtest("global_map_resize_bss"))
global_map_resize_bss_subtest();
if (test__start_subtest("global_map_resize_data"))
global_map_resize_data_subtest();
if (test__start_subtest("global_map_resize_invalid"))
global_map_resize_invalid_subtest();
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
/* rodata section */
const volatile pid_t pid;
const volatile size_t bss_array_len;
const volatile size_t data_array_len;
/* bss section */
int sum = 0;
int array[1];
/* custom data secton */
int my_array[1] SEC(".data.custom");
/* custom data section which should NOT be resizable,
* since it contains a single var which is not an array
*/
int my_int SEC(".data.non_array");
/* custom data section which should NOT be resizable,
* since its last var is not an array
*/
int my_array_first[1] SEC(".data.array_not_last");
int my_int_last SEC(".data.array_not_last");
SEC("tp/syscalls/sys_enter_getpid")
int bss_array_sum(void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
sum = 0;
for (size_t i = 0; i < bss_array_len; ++i)
sum += array[i];
return 0;
}
SEC("tp/syscalls/sys_enter_getuid")
int data_array_sum(void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
sum = 0;
for (size_t i = 0; i < data_array_len; ++i)
sum += my_array[i];
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment