Commit b9989b59 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Typeless/weak ksym for gen_loader + misc fixups'

Kumar Kartikeya says:

====================

Patches (1,2,3,6) add typeless and weak ksym support to gen_loader. It is follow
up for the recent kfunc from modules series.

The later patches (7,8) are misc fixes for selftests, and patch 4 for libbpf
where we try to be careful to not end up with fds == 0, as libbpf assumes in
various places that they are greater than 0. Patch 5 fixes up missing O_CLOEXEC
in libbpf.

Changelog:
----------
v4 -> v5
v4: https://lore.kernel.org/bpf/20211020191526.2306852-1-memxor@gmail.com

 * Address feedback from Andrii
   * Drop use of ensure_good_fd in unneeded call sites
   * Add sys_bpf_fd
   * Add _lskel suffix to all light skeletons and change all current selftests
   * Drop early break in close loop for sk_lookup
   * Fix other nits

v3 -> v4
v3: https://lore.kernel.org/bpf/20211014205644.1837280-1-memxor@gmail.com

 * Remove gpl_only = true from bpf_kallsyms_lookup_name (Alexei)
 * Add bpf_dump_raw_ok check to ensure kptr_restrict isn't bypassed (Alexei)

v2 -> v3
v2: https://lore.kernel.org/bpf/20211013073348.1611155-1-memxor@gmail.com

 * Address feedback from Song
   * Move ksym logging to separate helper to avoid code duplication
   * Move src_reg mask stuff to separate helper
   * Fix various other nits, add acks
     * __builtin_expect is used instead of likely to as skel_internal.h is
       included in isolation.

v1 -> v2
v1: https://lore.kernel.org/bpf/20211006002853.308945-1-memxor@gmail.com

 * Remove redundant OOM checks in emit_bpf_kallsyms_lookup_name
 * Use designated initializer for sk_lookup fd array (Jakub)
 * Do fd check for all fd returning low level APIs (Andrii, Alexei)
 * Make Fixes: tag quote commit message, use selftests/bpf prefix (Song, Andrii)
 * Split typeless and weak ksym support into separate patches, expand commit
   message (Song)
 * Fix duplication in selftests stemming from use of LSKELS_EXTRA (Song)
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 2895f48f efadf2ad
......@@ -2110,6 +2110,7 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
......
......@@ -4923,6 +4923,21 @@ union bpf_attr {
* Dynamically cast a *sk* pointer to a *unix_sock* pointer.
* Return
* *sk* if casting is valid, or **NULL** otherwise.
*
* long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
* Description
* Get the address of a kernel symbol, returned in *res*. *res* is
* set to 0 if the symbol is not found.
* Return
* On success, zero. On error, a negative value.
*
* **-EINVAL** if *flags* is not zero.
*
* **-EINVAL** if string *name* is not the same size as *name_sz*.
*
* **-ENOENT** if symbol is not found.
*
* **-EPERM** if caller does not have permission to obtain kernel address.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -5104,6 +5119,7 @@ union bpf_attr {
FN(get_branch_snapshot), \
FN(trace_vprintk), \
FN(skc_to_unix_sock), \
FN(kallsyms_lookup_name), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
......@@ -4781,6 +4781,31 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
.arg1_type = ARG_ANYTHING,
};
BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
{
if (flags)
return -EINVAL;
if (name_sz <= 1 || name[name_sz - 1])
return -EINVAL;
if (!bpf_dump_raw_ok(current_cred()))
return -EPERM;
*res = kallsyms_lookup_name(name);
return *res ? 0 : -ENOENT;
}
const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
.func = bpf_kallsyms_lookup_name,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_MEM,
.arg2_type = ARG_CONST_SIZE,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_LONG,
};
static const struct bpf_func_proto *
syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
......@@ -4791,6 +4816,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_btf_find_by_name_kind_proto;
case BPF_FUNC_sys_close:
return &bpf_sys_close_proto;
case BPF_FUNC_kallsyms_lookup_name:
return &bpf_kallsyms_lookup_name_proto;
default:
return tracing_prog_func_proto(func_id, prog);
}
......
......@@ -4923,6 +4923,21 @@ union bpf_attr {
* Dynamically cast a *sk* pointer to a *unix_sock* pointer.
* Return
* *sk* if casting is valid, or **NULL** otherwise.
*
* long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
* Description
* Get the address of a kernel symbol, returned in *res*. *res* is
* set to 0 if the symbol is not found.
* Return
* On success, zero. On error, a negative value.
*
* **-EINVAL** if *flags* is not zero.
*
* **-EINVAL** if string *name* is not the same size as *name_sz*.
*
* **-ENOENT** if symbol is not found.
*
* **-EPERM** if caller does not have permission to obtain kernel address.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -5104,6 +5119,7 @@ union bpf_attr {
FN(get_branch_snapshot), \
FN(trace_vprintk), \
FN(skc_to_unix_sock), \
FN(kallsyms_lookup_name), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
......@@ -65,13 +65,22 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
return syscall(__NR_bpf, cmd, attr, size);
}
static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
unsigned int size)
{
int fd;
fd = sys_bpf(cmd, attr, size);
return ensure_good_fd(fd);
}
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
{
int retries = 5;
int fd;
do {
fd = sys_bpf(BPF_PROG_LOAD, attr, size);
fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
} while (fd < 0 && errno == EAGAIN && retries-- > 0);
return fd;
......@@ -104,7 +113,7 @@ int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr
attr.inner_map_fd = create_attr->inner_map_fd;
attr.map_extra = create_attr->map_extra;
fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -206,7 +215,7 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
attr.numa_node = node;
}
fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -634,7 +643,7 @@ int bpf_obj_get(const char *pathname)
memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
fd = sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -745,7 +754,7 @@ int bpf_link_create(int prog_fd, int target_fd,
break;
}
proceed:
fd = sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -788,7 +797,7 @@ int bpf_iter_create(int link_fd)
memset(&attr, 0, sizeof(attr));
attr.iter_create.link_fd = link_fd;
fd = sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -946,7 +955,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
memset(&attr, 0, sizeof(attr));
attr.prog_id = id;
fd = sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -958,7 +967,7 @@ int bpf_map_get_fd_by_id(__u32 id)
memset(&attr, 0, sizeof(attr));
attr.map_id = id;
fd = sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -970,7 +979,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
memset(&attr, 0, sizeof(attr));
attr.btf_id = id;
fd = sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -982,7 +991,7 @@ int bpf_link_get_fd_by_id(__u32 id)
memset(&attr, 0, sizeof(attr));
attr.link_id = id;
fd = sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -1013,7 +1022,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
fd = sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......@@ -1033,7 +1042,7 @@ int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_s
attr.btf_log_buf = ptr_to_u64(log_buf);
}
fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr));
if (fd < 0 && !do_log && log_buf && log_buf_size) {
do_log = true;
......@@ -1075,7 +1084,7 @@ int bpf_enable_stats(enum bpf_stats_type type)
memset(&attr, 0, sizeof(attr));
attr.enable_stats.type = type;
fd = sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
......
......@@ -8,13 +8,19 @@ struct ksym_relo_desc {
int kind;
int insn_idx;
bool is_weak;
bool is_typeless;
};
struct ksym_desc {
const char *name;
int ref;
int kind;
int off;
union {
/* used for kfunc */
int off;
/* used for typeless ksym */
bool typeless;
};
int insn;
};
......@@ -49,7 +55,7 @@ void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_a
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, int kind,
int insn_idx);
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
bool is_typeless, int kind, int insn_idx);
#endif
......@@ -897,7 +897,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
}
fd = open(path, O_RDONLY);
fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("failed to open %s: %s\n", path, strerror(errno));
......
......@@ -13,6 +13,7 @@
#include "hashmap.h"
#include "bpf_gen_internal.h"
#include "skel_internal.h"
#include <asm/byteorder.h>
#define MAX_USED_MAPS 64
#define MAX_USED_PROGS 32
......@@ -560,7 +561,7 @@ static void emit_find_attach_target(struct bpf_gen *gen)
}
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
int kind, int insn_idx)
bool is_typeless, int kind, int insn_idx)
{
struct ksym_relo_desc *relo;
......@@ -573,6 +574,7 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
relo += gen->relo_cnt;
relo->name = name;
relo->is_weak = is_weak;
relo->is_typeless = is_typeless;
relo->kind = kind;
relo->insn_idx = insn_idx;
gen->relo_cnt++;
......@@ -622,6 +624,29 @@ static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_des
debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
}
/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
* Returns result in BPF_REG_7
* Returns u64 symbol addr in BPF_REG_9
*/
static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
{
int name_off, len = strlen(relo->name) + 1, res_off;
name_off = add_data(gen, relo->name, len);
res_off = add_data(gen, NULL, 8); /* res is u64 */
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, name_off));
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, res_off));
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
}
/* Expects:
* BPF_REG_8 - pointer to instruction
*
......@@ -701,10 +726,27 @@ static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo
relo->name, kdesc->ref);
}
static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
int ref)
{
if (!gen->log_level)
return;
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
offsetof(struct bpf_insn, imm)));
emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
offsetof(struct bpf_insn, imm)));
debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
relo->is_typeless, relo->is_weak, relo->name, ref);
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
relo->is_typeless, relo->is_weak, relo->name, ref);
}
/* Expects:
* BPF_REG_8 - pointer to instruction
*/
static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
static void emit_relo_ksym_typeless(struct bpf_gen *gen,
struct ksym_relo_desc *relo, int insn)
{
struct ksym_desc *kdesc;
......@@ -719,25 +761,81 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
goto log;
}
/* remember insn offset, so we can copy ksym addr later */
kdesc->insn = insn;
/* skip typeless ksym_desc in fd closing loop in cleanup_relos */
kdesc->typeless = true;
emit_bpf_kallsyms_lookup_name(gen, relo);
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
emit_check_err(gen);
/* store lower half of addr into insn[insn_idx].imm */
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
/* store upper half of addr into insn[insn_idx + 1].imm */
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
log:
emit_ksym_relo_log(gen, relo, kdesc->ref);
}
static __u32 src_reg_mask(void)
{
#if defined(__LITTLE_ENDIAN_BITFIELD)
return 0x0f; /* src_reg,dst_reg,... */
#elif defined(__BIG_ENDIAN_BITFIELD)
return 0xf0; /* dst_reg,src_reg,... */
#else
#error "Unsupported bit endianness, cannot proceed"
#endif
}
/* Expects:
* BPF_REG_8 - pointer to instruction
*/
static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
{
struct ksym_desc *kdesc;
__u32 reg_mask;
kdesc = get_ksym_desc(gen, relo);
if (!kdesc)
return;
/* try to copy from existing ldimm64 insn */
if (kdesc->ref > 1) {
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
kdesc->insn + offsetof(struct bpf_insn, imm));
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, imm)));
/* jump over src_reg adjustment if imm is not 0 */
emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 3));
goto clear_src_reg;
}
/* remember insn offset, so we can copy BTF ID and FD later */
kdesc->insn = insn;
emit_bpf_find_by_name_kind(gen, relo);
emit_check_err(gen);
if (!relo->is_weak)
emit_check_err(gen);
/* set default values as 0 */
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
/* skip success case stores if ret < 0 */
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 4));
/* store btf_id into insn[insn_idx].imm */
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
/* store btf_obj_fd into insn[insn_idx + 1].imm */
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
log:
if (!gen->log_level)
return;
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
offsetof(struct bpf_insn, imm)));
emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
offsetof(struct bpf_insn, imm)));
debug_regs(gen, BPF_REG_7, BPF_REG_9, " var (%s:count=%d): imm: %%d, fd: %%d",
relo->name, kdesc->ref);
emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
clear_src_reg:
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
reg_mask = src_reg_mask();
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
emit_ksym_relo_log(gen, relo, kdesc->ref);
}
static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
......@@ -749,7 +847,10 @@ static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
switch (relo->kind) {
case BTF_KIND_VAR:
emit_relo_ksym_btf(gen, relo, insn);
if (relo->is_typeless)
emit_relo_ksym_typeless(gen, relo, insn);
else
emit_relo_ksym_btf(gen, relo, insn);
break;
case BTF_KIND_FUNC:
emit_relo_kfunc_btf(gen, relo, insn);
......@@ -774,12 +875,13 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
int i, insn;
for (i = 0; i < gen->nr_ksyms; i++) {
if (gen->ksyms[i].kind == BTF_KIND_VAR) {
/* only close fds for typed ksyms and kfuncs */
if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
/* close fd recorded in insn[insn_idx + 1].imm */
insn = gen->ksyms[i].insn;
insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
emit_sys_close_blob(gen, insn);
} else { /* BTF_KIND_FUNC */
} else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
gen->nr_fd_array--;
......
......@@ -1232,7 +1232,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
*/
elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
} else {
obj->efile.fd = open(obj->path, O_RDONLY);
obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
if (obj->efile.fd < 0) {
char errmsg[STRERR_BUFSIZE], *cp;
......@@ -6575,17 +6575,14 @@ static int bpf_program__record_externs(struct bpf_program *prog)
case RELO_EXTERN_VAR:
if (ext->type != EXT_KSYM)
continue;
if (!ext->ksym.type_id) {
pr_warn("typeless ksym %s is not supported yet\n",
ext->name);
return -ENOTSUP;
}
bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak,
bpf_gen__record_extern(obj->gen_loader, ext->name,
ext->is_weak, !ext->ksym.type_id,
BTF_KIND_VAR, relo->insn_idx);
break;
case RELO_EXTERN_FUNC:
bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak,
BTF_KIND_FUNC, relo->insn_idx);
bpf_gen__record_extern(obj->gen_loader, ext->name,
ext->is_weak, false, BTF_KIND_FUNC,
relo->insn_idx);
break;
default:
continue;
......@@ -9618,7 +9615,7 @@ static int append_to_file(const char *file, const char *fmt, ...)
int fd, n, err = 0;
va_list ap;
fd = open(file, O_WRONLY | O_APPEND, 0);
fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
if (fd < 0)
return -errno;
......@@ -11263,7 +11260,7 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
int fd, err = 0, len;
char buf[128];
fd = open(fcpu, O_RDONLY);
fd = open(fcpu, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
......
......@@ -13,6 +13,8 @@
#include <limits.h>
#include <errno.h>
#include <linux/err.h>
#include <fcntl.h>
#include <unistd.h>
#include "libbpf_legacy.h"
#include "relo_core.h"
......@@ -491,4 +493,26 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
* Takes ownership of the fd passed in, and closes it if calling
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
*/
static inline int ensure_good_fd(int fd)
{
int old_fd = fd, saved_errno;
if (fd < 0)
return fd;
if (fd < 3) {
fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
saved_errno = errno;
close(old_fd);
if (fd < 0) {
pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno);
errno = saved_errno;
}
}
return fd;
}
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
......@@ -33,7 +33,7 @@ static int get_vendor_id(int ifindex)
snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
fd = open(path, O_RDONLY);
fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return -1;
......
......@@ -301,7 +301,7 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
if (!linker->filename)
return -ENOMEM;
linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, 0644);
linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
if (linker->fd < 0) {
err = -errno;
pr_warn("failed to create '%s': %d\n", file, err);
......@@ -556,7 +556,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
obj->filename = filename;
obj->fd = open(filename, O_RDONLY);
obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
if (obj->fd < 0) {
err = -errno;
pr_warn("failed to open file '%s': %d\n", filename, err);
......
......@@ -300,7 +300,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
if (!umem)
return -ENOMEM;
umem->fd = socket(AF_XDP, SOCK_RAW, 0);
umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
if (umem->fd < 0) {
err = -errno;
goto out_umem_alloc;
......@@ -549,7 +549,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
struct ifreq ifr = {};
int fd, err, ret;
fd = socket(AF_LOCAL, SOCK_DGRAM, 0);
fd = socket(AF_LOCAL, SOCK_DGRAM | SOCK_CLOEXEC, 0);
if (fd < 0)
return -errno;
......@@ -1046,7 +1046,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
}
if (umem->refcount++ > 0) {
xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
if (xsk->fd < 0) {
err = -errno;
goto out_xsk_alloc;
......
......@@ -325,7 +325,7 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c
# Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c
SKEL_BLACKLIST += $$(LSKELS)
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
......@@ -404,7 +404,7 @@ $(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
$(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=_lskel)) > $$@
$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o))
......
......@@ -4,13 +4,13 @@
#include "atomics.lskel.h"
static void test_add(struct atomics *skel)
static void test_add(struct atomics_lskel *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
int link_fd;
link_fd = atomics__add__attach(skel);
link_fd = atomics_lskel__add__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(add)"))
return;
......@@ -36,13 +36,13 @@ static void test_add(struct atomics *skel)
close(link_fd);
}
static void test_sub(struct atomics *skel)
static void test_sub(struct atomics_lskel *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
int link_fd;
link_fd = atomics__sub__attach(skel);
link_fd = atomics_lskel__sub__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(sub)"))
return;
......@@ -69,13 +69,13 @@ static void test_sub(struct atomics *skel)
close(link_fd);
}
static void test_and(struct atomics *skel)
static void test_and(struct atomics_lskel *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
int link_fd;
link_fd = atomics__and__attach(skel);
link_fd = atomics_lskel__and__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(and)"))
return;
......@@ -97,13 +97,13 @@ static void test_and(struct atomics *skel)
close(link_fd);
}
static void test_or(struct atomics *skel)
static void test_or(struct atomics_lskel *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
int link_fd;
link_fd = atomics__or__attach(skel);
link_fd = atomics_lskel__or__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(or)"))
return;
......@@ -126,13 +126,13 @@ static void test_or(struct atomics *skel)
close(link_fd);
}
static void test_xor(struct atomics *skel)
static void test_xor(struct atomics_lskel *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
int link_fd;
link_fd = atomics__xor__attach(skel);
link_fd = atomics_lskel__xor__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(xor)"))
return;
......@@ -154,13 +154,13 @@ static void test_xor(struct atomics *skel)
close(link_fd);
}
static void test_cmpxchg(struct atomics *skel)
static void test_cmpxchg(struct atomics_lskel *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
int link_fd;
link_fd = atomics__cmpxchg__attach(skel);
link_fd = atomics_lskel__cmpxchg__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)"))
return;
......@@ -183,13 +183,13 @@ static void test_cmpxchg(struct atomics *skel)
close(link_fd);
}
static void test_xchg(struct atomics *skel)
static void test_xchg(struct atomics_lskel *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
int link_fd;
link_fd = atomics__xchg__attach(skel);
link_fd = atomics_lskel__xchg__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(xchg)"))
return;
......@@ -212,10 +212,10 @@ static void test_xchg(struct atomics *skel)
void test_atomics(void)
{
struct atomics *skel;
struct atomics_lskel *skel;
__u32 duration = 0;
skel = atomics__open_and_load();
skel = atomics_lskel__open_and_load();
if (CHECK(!skel, "skel_load", "atomics skeleton failed\n"))
return;
......@@ -243,5 +243,5 @@ void test_atomics(void)
test_xchg(skel);
cleanup:
atomics__destroy(skel);
atomics_lskel__destroy(skel);
}
......@@ -6,23 +6,23 @@
void test_fentry_fexit(void)
{
struct fentry_test *fentry_skel = NULL;
struct fexit_test *fexit_skel = NULL;
struct fentry_test_lskel *fentry_skel = NULL;
struct fexit_test_lskel *fexit_skel = NULL;
__u64 *fentry_res, *fexit_res;
__u32 duration = 0, retval;
int err, prog_fd, i;
fentry_skel = fentry_test__open_and_load();
fentry_skel = fentry_test_lskel__open_and_load();
if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto close_prog;
fexit_skel = fexit_test__open_and_load();
fexit_skel = fexit_test_lskel__open_and_load();
if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
goto close_prog;
err = fentry_test__attach(fentry_skel);
err = fentry_test_lskel__attach(fentry_skel);
if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
goto close_prog;
err = fexit_test__attach(fexit_skel);
err = fexit_test_lskel__attach(fexit_skel);
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto close_prog;
......@@ -44,6 +44,6 @@ void test_fentry_fexit(void)
}
close_prog:
fentry_test__destroy(fentry_skel);
fexit_test__destroy(fexit_skel);
fentry_test_lskel__destroy(fentry_skel);
fexit_test_lskel__destroy(fexit_skel);
}
......@@ -3,19 +3,19 @@
#include <test_progs.h>
#include "fentry_test.lskel.h"
static int fentry_test(struct fentry_test *fentry_skel)
static int fentry_test(struct fentry_test_lskel *fentry_skel)
{
int err, prog_fd, i;
__u32 duration = 0, retval;
int link_fd;
__u64 *result;
err = fentry_test__attach(fentry_skel);
err = fentry_test_lskel__attach(fentry_skel);
if (!ASSERT_OK(err, "fentry_attach"))
return err;
/* Check that already linked program can't be attached again. */
link_fd = fentry_test__test1__attach(fentry_skel);
link_fd = fentry_test_lskel__test1__attach(fentry_skel);
if (!ASSERT_LT(link_fd, 0, "fentry_attach_link"))
return -1;
......@@ -31,7 +31,7 @@ static int fentry_test(struct fentry_test *fentry_skel)
return -1;
}
fentry_test__detach(fentry_skel);
fentry_test_lskel__detach(fentry_skel);
/* zero results for re-attach test */
memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss));
......@@ -40,10 +40,10 @@ static int fentry_test(struct fentry_test *fentry_skel)
void test_fentry_test(void)
{
struct fentry_test *fentry_skel = NULL;
struct fentry_test_lskel *fentry_skel = NULL;
int err;
fentry_skel = fentry_test__open_and_load();
fentry_skel = fentry_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))
goto cleanup;
......@@ -55,5 +55,5 @@ void test_fentry_test(void)
ASSERT_OK(err, "fentry_second_attach");
cleanup:
fentry_test__destroy(fentry_skel);
fentry_test_lskel__destroy(fentry_skel);
}
......@@ -10,7 +10,7 @@
static int do_sleep(void *skel)
{
struct fexit_sleep *fexit_skel = skel;
struct fexit_sleep_lskel *fexit_skel = skel;
struct timespec ts1 = { .tv_nsec = 1 };
struct timespec ts2 = { .tv_sec = 10 };
......@@ -25,16 +25,16 @@ static char child_stack[STACK_SIZE];
void test_fexit_sleep(void)
{
struct fexit_sleep *fexit_skel = NULL;
struct fexit_sleep_lskel *fexit_skel = NULL;
int wstatus, duration = 0;
pid_t cpid;
int err, fexit_cnt;
fexit_skel = fexit_sleep__open_and_load();
fexit_skel = fexit_sleep_lskel__open_and_load();
if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
goto cleanup;
err = fexit_sleep__attach(fexit_skel);
err = fexit_sleep_lskel__attach(fexit_skel);
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto cleanup;
......@@ -60,7 +60,7 @@ void test_fexit_sleep(void)
*/
close(fexit_skel->progs.nanosleep_fentry.prog_fd);
close(fexit_skel->progs.nanosleep_fexit.prog_fd);
fexit_sleep__detach(fexit_skel);
fexit_sleep_lskel__detach(fexit_skel);
/* kill the thread to unwind sys_nanosleep stack through the trampoline */
kill(cpid, 9);
......@@ -78,5 +78,5 @@ void test_fexit_sleep(void)
goto cleanup;
cleanup:
fexit_sleep__destroy(fexit_skel);
fexit_sleep_lskel__destroy(fexit_skel);
}
......@@ -3,19 +3,19 @@
#include <test_progs.h>
#include "fexit_test.lskel.h"
static int fexit_test(struct fexit_test *fexit_skel)
static int fexit_test(struct fexit_test_lskel *fexit_skel)
{
int err, prog_fd, i;
__u32 duration = 0, retval;
int link_fd;
__u64 *result;
err = fexit_test__attach(fexit_skel);
err = fexit_test_lskel__attach(fexit_skel);
if (!ASSERT_OK(err, "fexit_attach"))
return err;
/* Check that already linked program can't be attached again. */
link_fd = fexit_test__test1__attach(fexit_skel);
link_fd = fexit_test_lskel__test1__attach(fexit_skel);
if (!ASSERT_LT(link_fd, 0, "fexit_attach_link"))
return -1;
......@@ -31,7 +31,7 @@ static int fexit_test(struct fexit_test *fexit_skel)
return -1;
}
fexit_test__detach(fexit_skel);
fexit_test_lskel__detach(fexit_skel);
/* zero results for re-attach test */
memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss));
......@@ -40,10 +40,10 @@ static int fexit_test(struct fexit_test *fexit_skel)
void test_fexit_test(void)
{
struct fexit_test *fexit_skel = NULL;
struct fexit_test_lskel *fexit_skel = NULL;
int err;
fexit_skel = fexit_test__open_and_load();
fexit_skel = fexit_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))
goto cleanup;
......@@ -55,5 +55,5 @@ void test_fexit_test(void)
ASSERT_OK(err, "fexit_second_attach");
cleanup:
fexit_test__destroy(fexit_skel);
fexit_test_lskel__destroy(fexit_skel);
}
......@@ -7,10 +7,10 @@
static void test_main(void)
{
struct kfunc_call_test *skel;
struct kfunc_call_test_lskel *skel;
int prog_fd, retval, err;
skel = kfunc_call_test__open_and_load();
skel = kfunc_call_test_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel"))
return;
......@@ -26,7 +26,7 @@ static void test_main(void)
ASSERT_OK(err, "bpf_prog_test_run(test2)");
ASSERT_EQ(retval, 3, "test2-retval");
kfunc_call_test__destroy(skel);
kfunc_call_test_lskel__destroy(skel);
}
static void test_subprog(void)
......
......@@ -7,6 +7,7 @@
#include "test_ksyms_btf.skel.h"
#include "test_ksyms_btf_null_check.skel.h"
#include "test_ksyms_weak.skel.h"
#include "test_ksyms_weak.lskel.h"
static int duration;
......@@ -89,11 +90,11 @@ static void test_weak_syms(void)
int err;
skel = test_ksyms_weak__open_and_load();
if (CHECK(!skel, "test_ksyms_weak__open_and_load", "failed\n"))
if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
return;
err = test_ksyms_weak__attach(skel);
if (CHECK(err, "test_ksyms_weak__attach", "skeleton attach failed: %d\n", err))
if (!ASSERT_OK(err, "test_ksyms_weak__attach"))
goto cleanup;
/* trigger tracepoint */
......@@ -109,6 +110,33 @@ static void test_weak_syms(void)
test_ksyms_weak__destroy(skel);
}
static void test_weak_syms_lskel(void)
{
struct test_ksyms_weak_lskel *skel;
struct test_ksyms_weak_lskel__data *data;
int err;
skel = test_ksyms_weak_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_weak_lskel__open_and_load"))
return;
err = test_ksyms_weak_lskel__attach(skel);
if (!ASSERT_OK(err, "test_ksyms_weak_lskel__attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
data = skel->data;
ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
cleanup:
test_ksyms_weak_lskel__destroy(skel);
}
void test_ksyms_btf(void)
{
int percpu_datasec;
......@@ -136,4 +164,7 @@ void test_ksyms_btf(void)
if (test__start_subtest("weak_ksyms"))
test_weak_syms();
if (test__start_subtest("weak_ksyms_lskel"))
test_weak_syms_lskel();
}
......@@ -4,10 +4,11 @@
#include <test_progs.h>
#include <network_helpers.h>
#include "test_ksyms_module.lskel.h"
#include "test_ksyms_module.skel.h"
void test_ksyms_module(void)
void test_ksyms_module_lskel(void)
{
struct test_ksyms_module *skel;
struct test_ksyms_module_lskel *skel;
int retval;
int err;
......@@ -16,8 +17,8 @@ void test_ksyms_module(void)
return;
}
skel = test_ksyms_module__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open_and_load"))
skel = test_ksyms_module_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_module_lskel__open_and_load"))
return;
err = bpf_prog_test_run(skel->progs.load.prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, (__u32 *)&retval, NULL);
......@@ -25,6 +26,37 @@ void test_ksyms_module(void)
goto cleanup;
ASSERT_EQ(retval, 0, "retval");
ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
cleanup:
test_ksyms_module_lskel__destroy(skel);
}
void test_ksyms_module_libbpf(void)
{
struct test_ksyms_module *skel;
int retval, err;
if (!env.has_testmod) {
test__skip();
return;
}
skel = test_ksyms_module__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open"))
return;
err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4,
sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL);
if (!ASSERT_OK(err, "bpf_prog_test_run"))
goto cleanup;
ASSERT_EQ(retval, 0, "retval");
ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
cleanup:
test_ksyms_module__destroy(skel);
}
void test_ksyms_module(void)
{
if (test__start_subtest("lskel"))
test_ksyms_module_lskel();
if (test__start_subtest("libbpf"))
test_ksyms_module_libbpf();
}
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "test_ksyms_module.skel.h"
void test_ksyms_module_libbpf(void)
{
struct test_ksyms_module *skel;
int retval, err;
if (!env.has_testmod) {
test__skip();
return;
}
skel = test_ksyms_module__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open"))
return;
err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4,
sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL);
if (!ASSERT_OK(err, "bpf_prog_test_run"))
goto cleanup;
ASSERT_EQ(retval, 0, "retval");
ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
cleanup:
test_ksyms_module__destroy(skel);
}
......@@ -58,7 +58,7 @@ static int process_sample(void *ctx, void *data, size_t len)
}
}
static struct test_ringbuf *skel;
static struct test_ringbuf_lskel *skel;
static struct ring_buffer *ringbuf;
static void trigger_samples()
......@@ -90,13 +90,13 @@ void test_ringbuf(void)
int page_size = getpagesize();
void *mmap_ptr, *tmp_ptr;
skel = test_ringbuf__open();
skel = test_ringbuf_lskel__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
skel->maps.ringbuf.max_entries = page_size;
err = test_ringbuf__load(skel);
err = test_ringbuf_lskel__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
......@@ -154,7 +154,7 @@ void test_ringbuf(void)
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;
err = test_ringbuf__attach(skel);
err = test_ringbuf_lskel__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
goto cleanup;
......@@ -292,8 +292,8 @@ void test_ringbuf(void)
CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
1L, skel->bss->discarded);
test_ringbuf__detach(skel);
test_ringbuf_lskel__detach(skel);
cleanup:
ring_buffer__free(ringbuf);
test_ringbuf__destroy(skel);
test_ringbuf_lskel__destroy(skel);
}
......@@ -598,7 +598,7 @@ static void query_lookup_prog(struct test_sk_lookup *skel)
static void run_lookup_prog(const struct test *t)
{
int server_fds[MAX_SERVERS] = { -1 };
int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 };
int client_fd, reuse_conn_fd = -1;
struct bpf_link *lookup_link;
int i, err;
......@@ -1053,7 +1053,7 @@ static void run_sk_assign(struct test_sk_lookup *skel,
struct bpf_program *lookup_prog,
const char *remote_ip, const char *local_ip)
{
int server_fds[MAX_SERVERS] = { -1 };
int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 };
struct bpf_sk_lookup ctx;
__u64 server_cookie;
int i, err;
......
......@@ -43,7 +43,7 @@ static int process_sample(void *ctx, void *data, size_t len)
void test_test_ima(void)
{
char measured_dir_template[] = "/tmp/ima_measuredXXXXXX";
struct ring_buffer *ringbuf;
struct ring_buffer *ringbuf = NULL;
const char *measured_dir;
char cmd[256];
......@@ -85,5 +85,6 @@ void test_test_ima(void)
err = system(cmd);
CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno);
close_prog:
ring_buffer__free(ringbuf);
ima__destroy(skel);
}
......@@ -10,27 +10,27 @@
void serial_test_trace_printk(void)
{
struct trace_printk_lskel__bss *bss;
int err = 0, iter = 0, found = 0;
struct trace_printk__bss *bss;
struct trace_printk *skel;
struct trace_printk_lskel *skel;
char *buf = NULL;
FILE *fp = NULL;
size_t buflen;
skel = trace_printk__open();
skel = trace_printk_lskel__open();
if (!ASSERT_OK_PTR(skel, "trace_printk__open"))
return;
ASSERT_EQ(skel->rodata->fmt[0], 'T', "skel->rodata->fmt[0]");
skel->rodata->fmt[0] = 't';
err = trace_printk__load(skel);
err = trace_printk_lskel__load(skel);
if (!ASSERT_OK(err, "trace_printk__load"))
goto cleanup;
bss = skel->bss;
err = trace_printk__attach(skel);
err = trace_printk_lskel__attach(skel);
if (!ASSERT_OK(err, "trace_printk__attach"))
goto cleanup;
......@@ -43,7 +43,7 @@ void serial_test_trace_printk(void)
/* wait for tracepoint to trigger */
usleep(1);
trace_printk__detach(skel);
trace_printk_lskel__detach(skel);
if (!ASSERT_GT(bss->trace_printk_ran, 0, "bss->trace_printk_ran"))
goto cleanup;
......@@ -65,7 +65,7 @@ void serial_test_trace_printk(void)
goto cleanup;
cleanup:
trace_printk__destroy(skel);
trace_printk_lskel__destroy(skel);
free(buf);
if (fp)
fclose(fp);
......
......@@ -10,20 +10,20 @@
void serial_test_trace_vprintk(void)
{
struct trace_vprintk_lskel__bss *bss;
int err = 0, iter = 0, found = 0;
struct trace_vprintk__bss *bss;
struct trace_vprintk *skel;
struct trace_vprintk_lskel *skel;
char *buf = NULL;
FILE *fp = NULL;
size_t buflen;
skel = trace_vprintk__open_and_load();
skel = trace_vprintk_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
goto cleanup;
bss = skel->bss;
err = trace_vprintk__attach(skel);
err = trace_vprintk_lskel__attach(skel);
if (!ASSERT_OK(err, "trace_vprintk__attach"))
goto cleanup;
......@@ -36,7 +36,7 @@ void serial_test_trace_vprintk(void)
/* wait for tracepoint to trigger */
usleep(1);
trace_vprintk__detach(skel);
trace_vprintk_lskel__detach(skel);
if (!ASSERT_GT(bss->trace_vprintk_ran, 0, "bss->trace_vprintk_ran"))
goto cleanup;
......@@ -61,7 +61,7 @@ void serial_test_trace_vprintk(void)
goto cleanup;
cleanup:
trace_vprintk__destroy(skel);
trace_vprintk_lskel__destroy(skel);
free(buf);
if (fp)
fclose(fp);
......
......@@ -8,11 +8,11 @@
void test_verif_stats(void)
{
__u32 len = sizeof(struct bpf_prog_info);
struct trace_vprintk_lskel *skel;
struct bpf_prog_info info = {};
struct trace_vprintk *skel;
int err;
skel = trace_vprintk__open_and_load();
skel = trace_vprintk_lskel__open_and_load();
if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
goto cleanup;
......@@ -24,5 +24,5 @@ void test_verif_stats(void)
goto cleanup;
cleanup:
trace_vprintk__destroy(skel);
trace_vprintk_lskel__destroy(skel);
}
......@@ -38,7 +38,7 @@ int pass_handler(const void *ctx)
/* tests existing symbols. */
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
if (rq)
out__existing_typed = rq->cpu;
out__existing_typed = 0;
out__existing_typeless = (__u64)&bpf_prog_active;
/* tests non-existent symbols. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment