Commit ba05fd36 authored by Kumar Kartikeya Dwivedi's avatar Kumar Kartikeya Dwivedi Committed by Alexei Starovoitov

libbpf: Perform map fd cleanup for gen_loader in case of error

Alexei reported a fd leak issue in gen loader (when invoked from
bpftool) [0]. When adding ksym support, map fd allocation was moved from
stack to loader map, however I missed closing these fds (relevant when
cleanup label is jumped to on error). For the success case, the
allocated fd is returned in loader ctx, hence this problem is not
noticed.

Make three changes, first MAX_USED_MAPS in MAX_FD_ARRAY_SZ instead of
MAX_USED_PROGS, the braino was not a problem until now for this case as
we didn't try to close map fds (otherwise use of it would have tried
closing 32 additional fds in ksym btf fd range). Then, do a cleanup for
all nr_maps fds in cleanup label code, so that in case of error all
temporary map fds from bpf_gen__map_create are closed.

Then, adjust the cleanup label to only generate code for the required
number of program and map fds.  To trim code for remaining program
fds, lay out prog_fd array in stack in the end, so that we can
directly skip the remaining instances.  Still stack size remains same,
since changing that would require changes in a lot of places
(including adjustment of stack_off macro), so nr_progs_sz variable is
only used to track required number of iterations (and jump over
cleanup size calculated from that), stack offset calculation remains
unaffected.

The difference for test_ksyms_module.o is as follows:
libbpf: //prog cleanup iterations: before = 34, after = 5
libbpf: //maps cleanup iterations: before = 64, after = 2

Also, move allocation of gen->fd_array offset to bpf_gen__init. Since
offset can now be 0, and we already continue even if add_data returns 0
in case of failure, we do not need to distinguish between 0 offset and
failure case 0, as we rely on bpf_gen__finish to check errors. We can
also skip check for gen->fd_array in add_*_fd functions, since
bpf_gen__init will take care of it.

  [0]: https://lore.kernel.org/bpf/CAADnVQJ6jSitKSNKyxOrUzwY2qDRX0sPkJ=VLGHuCLVJ=qOt9g@mail.gmail.com

Fixes: 18f4fccb ("libbpf: Update gen_loader to emit BTF_KIND_FUNC relocations")
Reported-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211112232022.899074-1-memxor@gmail.com
parent 2453afe3
...@@ -45,8 +45,8 @@ struct bpf_gen { ...@@ -45,8 +45,8 @@ struct bpf_gen {
int nr_fd_array; int nr_fd_array;
}; };
void bpf_gen__init(struct bpf_gen *gen, int log_level); void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
int bpf_gen__finish(struct bpf_gen *gen); int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
void bpf_gen__free(struct bpf_gen *gen); void bpf_gen__free(struct bpf_gen *gen);
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx); void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#define MAX_USED_MAPS 64 #define MAX_USED_MAPS 64
#define MAX_USED_PROGS 32 #define MAX_USED_PROGS 32
#define MAX_KFUNC_DESCS 256 #define MAX_KFUNC_DESCS 256
#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS) #define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
/* The following structure describes the stack layout of the loader program. /* The following structure describes the stack layout of the loader program.
* In addition R6 contains the pointer to context. * In addition R6 contains the pointer to context.
...@@ -33,8 +33,8 @@ ...@@ -33,8 +33,8 @@
*/ */
struct loader_stack { struct loader_stack {
__u32 btf_fd; __u32 btf_fd;
__u32 prog_fd[MAX_USED_PROGS];
__u32 inner_map_fd; __u32 inner_map_fd;
__u32 prog_fd[MAX_USED_PROGS];
}; };
#define stack_off(field) \ #define stack_off(field) \
...@@ -42,6 +42,11 @@ struct loader_stack { ...@@ -42,6 +42,11 @@ struct loader_stack {
#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field)) #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
static int blob_fd_array_off(struct bpf_gen *gen, int index)
{
return gen->fd_array + index * sizeof(int);
}
static int realloc_insn_buf(struct bpf_gen *gen, __u32 size) static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
{ {
size_t off = gen->insn_cur - gen->insn_start; size_t off = gen->insn_cur - gen->insn_start;
...@@ -102,11 +107,15 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in ...@@ -102,11 +107,15 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in
emit(gen, insn2); emit(gen, insn2);
} }
void bpf_gen__init(struct bpf_gen *gen, int log_level) static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
{ {
size_t stack_sz = sizeof(struct loader_stack); size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
int i; int i;
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
gen->log_level = log_level; gen->log_level = log_level;
/* save ctx pointer into R6 */ /* save ctx pointer into R6 */
emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1)); emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
...@@ -118,19 +127,27 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level) ...@@ -118,19 +127,27 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
/* amount of stack actually used, only used to calculate iterations, not stack offset */
nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
/* jump over cleanup code */ /* jump over cleanup code */
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
/* size of cleanup code below */ /* size of cleanup code below (including map fd cleanup) */
(stack_sz / 4) * 3 + 2)); (nr_progs_sz / 4) * 3 + 2 +
/* 6 insns for emit_sys_close_blob,
* 6 insns for debug_regs in emit_sys_close_blob
*/
nr_maps * (6 + (gen->log_level ? 6 : 0))));
/* remember the label where all error branches will jump to */ /* remember the label where all error branches will jump to */
gen->cleanup_label = gen->insn_cur - gen->insn_start; gen->cleanup_label = gen->insn_cur - gen->insn_start;
/* emit cleanup code: close all temp FDs */ /* emit cleanup code: close all temp FDs */
for (i = 0; i < stack_sz; i += 4) { for (i = 0; i < nr_progs_sz; i += 4) {
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i)); emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1)); emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
} }
for (i = 0; i < nr_maps; i++)
emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */ /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7)); emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
emit(gen, BPF_EXIT_INSN()); emit(gen, BPF_EXIT_INSN());
...@@ -160,8 +177,6 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size) ...@@ -160,8 +177,6 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
*/ */
static int add_map_fd(struct bpf_gen *gen) static int add_map_fd(struct bpf_gen *gen)
{ {
if (!gen->fd_array)
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
if (gen->nr_maps == MAX_USED_MAPS) { if (gen->nr_maps == MAX_USED_MAPS) {
pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS); pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
gen->error = -E2BIG; gen->error = -E2BIG;
...@@ -174,8 +189,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen) ...@@ -174,8 +189,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
{ {
int cur; int cur;
if (!gen->fd_array)
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
if (gen->nr_fd_array == MAX_KFUNC_DESCS) { if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
cur = add_data(gen, NULL, sizeof(int)); cur = add_data(gen, NULL, sizeof(int));
return (cur - gen->fd_array) / sizeof(int); return (cur - gen->fd_array) / sizeof(int);
...@@ -183,11 +196,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen) ...@@ -183,11 +196,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
return MAX_USED_MAPS + gen->nr_fd_array++; return MAX_USED_MAPS + gen->nr_fd_array++;
} }
static int blob_fd_array_off(struct bpf_gen *gen, int index)
{
return gen->fd_array + index * sizeof(int);
}
static int insn_bytes_to_bpf_size(__u32 sz) static int insn_bytes_to_bpf_size(__u32 sz)
{ {
switch (sz) { switch (sz) {
...@@ -359,10 +367,15 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off) ...@@ -359,10 +367,15 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
__emit_sys_close(gen); __emit_sys_close(gen);
} }
int bpf_gen__finish(struct bpf_gen *gen) int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
{ {
int i; int i;
if (nr_progs != gen->nr_progs || nr_maps != gen->nr_maps) {
pr_warn("progs/maps mismatch\n");
gen->error = -EFAULT;
return gen->error;
}
emit_sys_close_stack(gen, stack_off(btf_fd)); emit_sys_close_stack(gen, stack_off(btf_fd));
for (i = 0; i < gen->nr_progs; i++) for (i = 0; i < gen->nr_progs; i++)
move_stack2ctx(gen, move_stack2ctx(gen,
......
...@@ -7258,7 +7258,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) ...@@ -7258,7 +7258,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
} }
if (obj->gen_loader) if (obj->gen_loader)
bpf_gen__init(obj->gen_loader, attr->log_level); bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps);
err = bpf_object__probe_loading(obj); err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false); err = err ? : bpf_object__load_vmlinux_btf(obj, false);
...@@ -7277,7 +7277,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) ...@@ -7277,7 +7277,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
for (i = 0; i < obj->nr_maps; i++) for (i = 0; i < obj->nr_maps; i++)
obj->maps[i].fd = -1; obj->maps[i].fd = -1;
if (!err) if (!err)
err = bpf_gen__finish(obj->gen_loader); err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
} }
/* clean up fd_array */ /* clean up fd_array */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment