Commit de763fbb authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'libbpf: Fixed various checkpatch issues'

Kang Minchul says:

====================
This patch series contains various checkpatch fixes
in btf.c, libbpf.c, ringbuf.c.

I know these are trivial but some issues are hard to ignore
and I think these checkpatch issues are accumulating.

v1 -> v2: changed cover letter message.
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents e662c775 b486d19a
...@@ -1724,7 +1724,8 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf) ...@@ -1724,7 +1724,8 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len); memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
/* and now restore original strings section size; types data size /* and now restore original strings section size; types data size
* wasn't modified, so doesn't need restoring, see big comment above */ * wasn't modified, so doesn't need restoring, see big comment above
*/
btf->hdr->str_len = old_strs_len; btf->hdr->str_len = old_strs_len;
hashmap__free(p.str_off_map); hashmap__free(p.str_off_map);
...@@ -2329,7 +2330,7 @@ int btf__add_restrict(struct btf *btf, int ref_type_id) ...@@ -2329,7 +2330,7 @@ int btf__add_restrict(struct btf *btf, int ref_type_id)
*/ */
int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id) int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
{ {
if (!value|| !value[0]) if (!value || !value[0])
return libbpf_err(-EINVAL); return libbpf_err(-EINVAL);
return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id); return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
......
...@@ -347,7 +347,8 @@ enum sec_def_flags { ...@@ -347,7 +347,8 @@ enum sec_def_flags {
SEC_ATTACHABLE = 2, SEC_ATTACHABLE = 2,
SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
/* attachment target is specified through BTF ID in either kernel or /* attachment target is specified through BTF ID in either kernel or
* other BPF program's BTF object */ * other BPF program's BTF object
*/
SEC_ATTACH_BTF = 4, SEC_ATTACH_BTF = 4,
/* BPF program type allows sleeping/blocking in kernel */ /* BPF program type allows sleeping/blocking in kernel */
SEC_SLEEPABLE = 8, SEC_SLEEPABLE = 8,
...@@ -488,7 +489,7 @@ struct bpf_map { ...@@ -488,7 +489,7 @@ struct bpf_map {
char *name; char *name;
/* real_name is defined for special internal maps (.rodata*, /* real_name is defined for special internal maps (.rodata*,
* .data*, .bss, .kconfig) and preserves their original ELF section * .data*, .bss, .kconfig) and preserves their original ELF section
* name. This is important to be be able to find corresponding BTF * name. This is important to be able to find corresponding BTF
* DATASEC information. * DATASEC information.
*/ */
char *real_name; char *real_name;
...@@ -1863,10 +1864,18 @@ static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, ...@@ -1863,10 +1864,18 @@ static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
return -ERANGE; return -ERANGE;
} }
switch (ext->kcfg.sz) { switch (ext->kcfg.sz) {
case 1: *(__u8 *)ext_val = value; break; case 1:
case 2: *(__u16 *)ext_val = value; break; *(__u8 *)ext_val = value;
case 4: *(__u32 *)ext_val = value; break; break;
case 8: *(__u64 *)ext_val = value; break; case 2:
*(__u16 *)ext_val = value;
break;
case 4:
*(__u32 *)ext_val = value;
break;
case 8:
*(__u64 *)ext_val = value;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -3518,7 +3527,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) ...@@ -3518,7 +3527,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} }
/* sort BPF programs by section name and in-section instruction offset /* sort BPF programs by section name and in-section instruction offset
* for faster search */ * for faster search
*/
if (obj->nr_programs) if (obj->nr_programs)
qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
...@@ -4965,9 +4975,9 @@ bpf_object__reuse_map(struct bpf_map *map) ...@@ -4965,9 +4975,9 @@ bpf_object__reuse_map(struct bpf_map *map)
err = bpf_map__reuse_fd(map, pin_fd); err = bpf_map__reuse_fd(map, pin_fd);
close(pin_fd); close(pin_fd);
if (err) { if (err)
return err; return err;
}
map->pinned = true; map->pinned = true;
pr_debug("reused pinned map at '%s'\n", map->pin_path); pr_debug("reused pinned map at '%s'\n", map->pin_path);
...@@ -6237,7 +6247,8 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, ...@@ -6237,7 +6247,8 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
* prog; each main prog can have a different set of * prog; each main prog can have a different set of
* subprograms appended (potentially in different order as * subprograms appended (potentially in different order as
* well), so position of any subprog can be different for * well), so position of any subprog can be different for
* different main programs */ * different main programs
*/
insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
......
...@@ -128,7 +128,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, ...@@ -128,7 +128,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
/* Map read-only producer page and data pages. We map twice as big /* Map read-only producer page and data pages. We map twice as big
* data size to allow simple reading of samples that wrap around the * data size to allow simple reading of samples that wrap around the
* end of a ring buffer. See kernel implementation for details. * end of a ring buffer. See kernel implementation for details.
* */ */
tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ, tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ,
MAP_SHARED, map_fd, rb->page_size); MAP_SHARED, map_fd, rb->page_size);
if (tmp == MAP_FAILED) { if (tmp == MAP_FAILED) {
...@@ -220,7 +220,7 @@ static inline int roundup_len(__u32 len) ...@@ -220,7 +220,7 @@ static inline int roundup_len(__u32 len)
return (len + 7) / 8 * 8; return (len + 7) / 8 * 8;
} }
static int64_t ringbuf_process_ring(struct ring* r) static int64_t ringbuf_process_ring(struct ring *r)
{ {
int *len_ptr, len, err; int *len_ptr, len, err;
/* 64-bit to avoid overflow in case of extreme application behavior */ /* 64-bit to avoid overflow in case of extreme application behavior */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment