Commit 339bbff2 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2018-12-21

The following pull-request contains BPF updates for your *net-next* tree.

There is a merge conflict in test_verifier.c. Result looks as follows:

        [...]
        },
        {
                "calls: cross frame pruning",
                .insns = {
                [...]
                .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
                .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
                .result_unpriv = REJECT,
                .errstr = "!read_ok",
                .result = REJECT,
	},
        {
                "jset: functional",
                .insns = {
        [...]
        {
                "jset: unknown const compare not taken",
                .insns = {
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                                     BPF_FUNC_get_prandom_u32),
                        BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
                        BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
                        BPF_EXIT_INSN(),
                },
                .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
                .errstr_unpriv = "!read_ok",
                .result_unpriv = REJECT,
                .errstr = "!read_ok",
                .result = REJECT,
        },
        [...]
        {
                "jset: range",
                .insns = {
                [...]
                },
                .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
                .result_unpriv = ACCEPT,
                .result = ACCEPT,
        },

The main changes are:

1) Various BTF related improvements in order to get line info
   working. Meaning, verifier will now annotate the corresponding
   BPF C code to the error log, from Martin and Yonghong.

2) Implement support for raw BPF tracepoints in modules, from Matt.

3) Add several improvements to verifier state logic, namely speeding
   up stacksafe check, optimizations for stack state equivalence
   test and safety checks for liveness analysis, from Alexei.

4) Teach verifier to make use of BPF_JSET instruction, add several
   test cases to kselftests and remove nfp specific JSET optimization
   now that verifier has awareness, from Jakub.

5) Improve BPF verifier's slot_type marking logic in order to
   allow more stack slot sharing, from Jiong.

6) Add sk_msg->size member for context access and add set of fixes
   and improvements to make sock_map with kTLS usable with openssl
   based applications, from John.

7) Several cleanups and documentation updates in bpftool as well as
   auto-mount of tracefs for "bpftool prog tracelog" command,
   from Quentin.

8) Include sub-program tags from now on in bpf_prog_info in order to
   have a reliable way for user space to get all tags of the program
   e.g. needed for kallsyms correlation, from Song.

9) Add BTF annotations for cgroup_local_storage BPF maps and
   implement bpf fs pretty print support, from Roman.

10) Fix bpftool in order to allow for cross-compilation, from Ivan.

11) Update of bpftool license to GPLv2-only + BSD-2-Clause in order
    to be compatible with libbfd and allow for Debian packaging,
    from Jakub.

12) Remove an obsolete prog->aux sanitation in dump and get rid of
    version check for prog load, from Daniel.

13) Fix a memory leak in libbpf's line info handling, from Prashant.

14) Fix cpumap's frame alignment for build_skb() so that skb_shared_info
    does not get unaligned, from Jesper.

15) Fix test_progs kselftest to work with older compilers which are less
    smart in optimizing (and thus throwing build error), from Stanislav.

16) Cleanup and simplify AF_XDP socket teardown, from Björn.

17) Fix sk lookup in BPF kselftest's test_sock_addr with regards
    to netns_id argument, from Andrey.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e770454f 1cf4a0cc
......@@ -932,6 +932,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog->jited_len = image_size;
if (!prog->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(prog, ctx.offset);
out_off:
kfree(ctx.offset);
kfree(jit_data);
......
......@@ -1575,6 +1575,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog->jited_len = image_size;
if (!prog->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(prog, ctx.offset);
out_off:
kfree(ctx.offset);
kfree(jit_data);
......
......@@ -3052,26 +3052,19 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
u8 dst_gpr = insn->dst_reg * 2;
swreg tmp_reg;
if (!imm) {
meta->skip = true;
return 0;
}
if (imm & ~0U) {
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
emit_alu(nfp_prog, reg_none(),
reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
emit_br(nfp_prog, BR_BNE, insn->off, 0);
}
if (imm >> 32) {
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
emit_alu(nfp_prog, imm_b(nfp_prog),
reg_a(dst_gpr), ALU_OP_AND, tmp_reg);
/* Upper word of the mask can only be 0 or ~0 from sign extension,
* so either ignore it or OR the whole thing in.
*/
if (imm >> 32)
emit_alu(nfp_prog, reg_none(),
reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
emit_br(nfp_prog, BR_BNE, insn->off, 0);
}
reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
}
......
......@@ -23,6 +23,7 @@ struct bpf_prog;
struct bpf_map;
struct sock;
struct seq_file;
struct btf;
struct btf_type;
/* map is generic key/value storage optionally accesible by eBPF programs */
......@@ -52,6 +53,7 @@ struct bpf_map_ops {
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
int (*map_check_btf)(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
};
......@@ -126,6 +128,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
}
int map_check_no_btf(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
......
......@@ -38,6 +38,7 @@ enum bpf_reg_liveness {
REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */
};
struct bpf_reg_state {
......@@ -224,6 +225,7 @@ struct bpf_verifier_env {
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
......
......@@ -7,6 +7,7 @@
#include <linux/types.h>
struct btf;
struct btf_member;
struct btf_type;
union bpf_attr;
......@@ -46,7 +47,9 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
bool btf_name_offset_valid(const struct btf *btf, u32 offset);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
......
......@@ -432,6 +432,10 @@ struct module {
unsigned int num_tracepoints;
tracepoint_ptr_t *tracepoints_ptrs;
#endif
#ifdef CONFIG_BPF_EVENTS
unsigned int num_bpf_raw_events;
struct bpf_raw_event_map *bpf_raw_events;
#endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
......
......@@ -36,6 +36,7 @@ struct sk_msg_sg {
struct scatterlist data[MAX_MSG_FRAGS + 1];
};
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
struct sk_msg_sg sg;
void *data;
......@@ -416,6 +417,14 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
sk_psock_drop(sk, psock);
}
static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
{
if (psock->parser.enabled)
psock->parser.saved_data_ready(sk);
else
sk->sk_data_ready(sk);
}
static inline void psock_set_prog(struct bpf_prog **pprog,
struct bpf_prog *prog)
{
......
......@@ -286,6 +286,7 @@ struct ucred {
#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
......
......@@ -471,7 +471,8 @@ void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr);
......@@ -502,10 +503,13 @@ static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf
{
return -EOPNOTSUPP;
}
static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
{
return NULL;
}
static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
{
}
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
......
......@@ -460,6 +460,15 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
}
static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_tx(ctx);
}
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
......
......@@ -133,6 +133,14 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK,
};
/* Note that tracing related programs such as
* BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
* are not subject to a stable API since kernel internal data
* structures can change from release to release and may
* therefore break existing tracing BPF programs. Tracing BPF
* programs correspond to /a/ specific kernel which is to be
* analyzed, and not /a/ specific kernel /and/ all future ones.
*/
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
......@@ -343,7 +351,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 kern_version; /* not used */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_ifindex; /* ifindex of netdev to prep for */
......@@ -2657,6 +2665,7 @@ struct sk_msg_md {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
__u32 size; /* Total size of sk_msg */
};
struct sk_reuseport_md {
......@@ -2717,6 +2726,8 @@ struct bpf_prog_info {
__u32 nr_jited_line_info;
__u32 line_info_rec_size;
__u32 jited_line_info_rec_size;
__u32 nr_prog_tags;
__aligned_u64 prog_tags;
} __attribute__((aligned(8)));
struct bpf_map_info {
......
......@@ -34,7 +34,9 @@ struct btf_type {
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
* bits 28-31: unused
* bits 28-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
__u32 info;
/* "size" is used by INT, ENUM, STRUCT and UNION.
......@@ -52,6 +54,7 @@ struct btf_type {
#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
#define BTF_INFO_KFLAG(info) ((info) >> 31)
#define BTF_KIND_UNKN 0 /* Unknown */
#define BTF_KIND_INT 1 /* Integer */
......@@ -110,9 +113,22 @@ struct btf_array {
struct btf_member {
__u32 name_off;
__u32 type;
__u32 offset; /* offset in bits */
/* If the type info kind_flag is set, the btf_member offset
* contains both member bitfield size and bit offset. The
* bitfield size is set for bitfield members. If the type
* info kind_flag is not set, the offset contains only bit
* offset.
*/
__u32 offset;
};
/* If the struct/union type info kind_flag is set, the
* following two macros are used to access bitfield_size
* and bit_offset from btf_member.offset.
*/
#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
* The exact number of btf_param is stored in the vlen (of the
* info in "struct btf_type").
......
......@@ -382,6 +382,7 @@ static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
}
static int array_map_check_btf(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
......
This diff is collapsed.
......@@ -183,7 +183,7 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
* is not at a fixed memory location, with mixed length
* packets, which is bad for cache-line hotness.
*/
frame_size = SKB_DATA_ALIGN(xdpf->len) + xdpf->headroom +
frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
pkt_data_start = xdpf->data - xdpf->headroom;
......
//SPDX-License-Identifier: GPL-2.0
#include <linux/bpf-cgroup.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bug.h>
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <uapi/linux/btf.h>
DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
......@@ -308,6 +310,85 @@ static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
}
static int cgroup_storage_check_btf(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
struct btf_member *m;
u32 offset, size;
/* Key is expected to be of struct bpf_cgroup_storage_key type,
* which is:
* struct bpf_cgroup_storage_key {
* __u64 cgroup_inode_id;
* __u32 attach_type;
* };
*/
/*
* Key_type must be a structure with two fields.
*/
if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
BTF_INFO_VLEN(key_type->info) != 2)
return -EINVAL;
/*
* The first field must be a 64 bit integer at 0 offset.
*/
m = (struct btf_member *)(key_type + 1);
size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
return -EINVAL;
/*
* The second field must be a 32 bit integer at 64 bit offset.
*/
m++;
offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
return -EINVAL;
return 0;
}
static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
struct seq_file *m)
{
enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
struct bpf_cgroup_storage_key *key = _key;
struct bpf_cgroup_storage *storage;
int cpu;
rcu_read_lock();
storage = cgroup_storage_lookup(map_to_storage(map), key, false);
if (!storage) {
rcu_read_unlock();
return;
}
btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
stype = cgroup_storage_type(map);
if (stype == BPF_CGROUP_STORAGE_SHARED) {
seq_puts(m, ": ");
btf_type_seq_show(map->btf, map->btf_value_type_id,
&READ_ONCE(storage->buf)->data[0], m);
seq_puts(m, "\n");
} else {
seq_puts(m, ": {\n");
for_each_possible_cpu(cpu) {
seq_printf(m, "\tcpu%d: ", cpu);
btf_type_seq_show(map->btf, map->btf_value_type_id,
per_cpu_ptr(storage->percpu_buf, cpu),
m);
seq_puts(m, "\n");
}
seq_puts(m, "}\n");
}
rcu_read_unlock();
}
const struct bpf_map_ops cgroup_storage_map_ops = {
.map_alloc = cgroup_storage_map_alloc,
.map_free = cgroup_storage_map_free,
......@@ -315,7 +396,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
.map_lookup_elem = cgroup_storage_lookup_elem,
.map_update_elem = cgroup_storage_update_elem,
.map_delete_elem = cgroup_storage_delete_elem,
.map_check_btf = map_check_no_btf,
.map_check_btf = cgroup_storage_check_btf,
.map_seq_show_elem = cgroup_storage_seq_show_elem,
};
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
......
......@@ -728,6 +728,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
}
static int trie_check_btf(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
......
......@@ -456,6 +456,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
}
int map_check_no_btf(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
......@@ -478,7 +479,7 @@ static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
return -EINVAL;
if (map->ops->map_check_btf)
ret = map->ops->map_check_btf(map, key_type, value_type);
ret = map->ops->map_check_btf(map, btf, key_type, value_type);
return ret;
}
......@@ -1472,11 +1473,6 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
return -E2BIG;
if (type == BPF_PROG_TYPE_KPROBE &&
attr->kern_version != LINUX_VERSION_CODE)
return -EINVAL;
if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
type != BPF_PROG_TYPE_CGROUP_SKB &&
!capable(CAP_SYS_ADMIN))
......@@ -1608,6 +1604,7 @@ static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
bpf_prog_put(raw_tp->prog);
}
bpf_put_raw_tracepoint(raw_tp->btp);
kfree(raw_tp);
return 0;
}
......@@ -1633,13 +1630,15 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
return -EFAULT;
tp_name[sizeof(tp_name) - 1] = 0;
btp = bpf_find_raw_tracepoint(tp_name);
btp = bpf_get_raw_tracepoint(tp_name);
if (!btp)
return -ENOENT;
raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
if (!raw_tp)
return -ENOMEM;
if (!raw_tp) {
err = -ENOMEM;
goto out_put_btp;
}
raw_tp->btp = btp;
prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
......@@ -1667,6 +1666,8 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
bpf_prog_put(prog);
out_free_tp:
kfree(raw_tp);
out_put_btp:
bpf_put_raw_tracepoint(btp);
return err;
}
......@@ -2031,13 +2032,6 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
insns[i + 1].imm = 0;
continue;
}
if (!bpf_dump_raw_ok() &&
imm == (unsigned long)prog->aux) {
insns[i].imm = 0;
insns[i + 1].imm = 0;
continue;
}
}
return insns;
......@@ -2271,33 +2265,25 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
ulen = info.nr_func_info;
info.nr_func_info = prog->aux->func_info_cnt;
if (info.nr_func_info && ulen) {
if (bpf_dump_raw_ok()) {
char __user *user_finfo;
char __user *user_finfo;
user_finfo = u64_to_user_ptr(info.func_info);
ulen = min_t(u32, info.nr_func_info, ulen);
if (copy_to_user(user_finfo, prog->aux->func_info,
info.func_info_rec_size * ulen))
return -EFAULT;
} else {
info.func_info = 0;
}
user_finfo = u64_to_user_ptr(info.func_info);
ulen = min_t(u32, info.nr_func_info, ulen);
if (copy_to_user(user_finfo, prog->aux->func_info,
info.func_info_rec_size * ulen))
return -EFAULT;
}
ulen = info.nr_line_info;
info.nr_line_info = prog->aux->nr_linfo;
if (info.nr_line_info && ulen) {
if (bpf_dump_raw_ok()) {
__u8 __user *user_linfo;
__u8 __user *user_linfo;
user_linfo = u64_to_user_ptr(info.line_info);
ulen = min_t(u32, info.nr_line_info, ulen);
if (copy_to_user(user_linfo, prog->aux->linfo,
info.line_info_rec_size * ulen))
return -EFAULT;
} else {
info.line_info = 0;
}
user_linfo = u64_to_user_ptr(info.line_info);
ulen = min_t(u32, info.nr_line_info, ulen);
if (copy_to_user(user_linfo, prog->aux->linfo,
info.line_info_rec_size * ulen))
return -EFAULT;
}
ulen = info.nr_jited_line_info;
......@@ -2322,6 +2308,28 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
}
ulen = info.nr_prog_tags;
info.nr_prog_tags = prog->aux->func_cnt ? : 1;
if (ulen) {
__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
u32 i;
user_prog_tags = u64_to_user_ptr(info.prog_tags);
ulen = min_t(u32, info.nr_prog_tags, ulen);
if (prog->aux->func_cnt) {
for (i = 0; i < ulen; i++) {
if (copy_to_user(user_prog_tags[i],
prog->aux->func[i]->tag,
BPF_TAG_SIZE))
return -EFAULT;
}
} else {
if (copy_to_user(user_prog_tags[0],
prog->tag, BPF_TAG_SIZE))
return -EFAULT;
}
}
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
......
This diff is collapsed.
......@@ -3093,6 +3093,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
sizeof(*mod->tracepoints_ptrs),
&mod->num_tracepoints);
#endif
#ifdef CONFIG_BPF_EVENTS
mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
sizeof(*mod->bpf_raw_events),
&mod->num_bpf_raw_events);
#endif
#ifdef HAVE_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
sizeof(*mod->jump_entries),
......
......@@ -17,6 +17,43 @@
#include "trace_probe.h"
#include "trace.h"
#ifdef CONFIG_MODULES
struct bpf_trace_module {
struct module *module;
struct list_head list;
};
static LIST_HEAD(bpf_trace_modules);
static DEFINE_MUTEX(bpf_module_mutex);
static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
{
struct bpf_raw_event_map *btp, *ret = NULL;
struct bpf_trace_module *btm;
unsigned int i;
mutex_lock(&bpf_module_mutex);
list_for_each_entry(btm, &bpf_trace_modules, list) {
for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
btp = &btm->module->bpf_raw_events[i];
if (!strcmp(btp->tp->name, name)) {
if (try_module_get(btm->module))
ret = btp;
goto out;
}
}
}
out:
mutex_unlock(&bpf_module_mutex);
return ret;
}
#else
static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
{
return NULL;
}
#endif /* CONFIG_MODULES */
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
......@@ -1076,7 +1113,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
extern struct bpf_raw_event_map __start__bpf_raw_tp[];
extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
{
struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
......@@ -1084,7 +1121,16 @@ struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
if (!strcmp(btp->tp->name, name))
return btp;
}
return NULL;
return bpf_get_raw_tracepoint_module(name);
}
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
{
struct module *mod = __module_address((unsigned long)btp);
if (mod)
module_put(mod);
}
static __always_inline
......@@ -1222,3 +1268,52 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
return err;
}
#ifdef CONFIG_MODULES
int bpf_event_notify(struct notifier_block *nb, unsigned long op, void *module)
{
struct bpf_trace_module *btm, *tmp;
struct module *mod = module;
if (mod->num_bpf_raw_events == 0 ||
(op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
return 0;
mutex_lock(&bpf_module_mutex);
switch (op) {
case MODULE_STATE_COMING:
btm = kzalloc(sizeof(*btm), GFP_KERNEL);
if (btm) {
btm->module = module;
list_add(&btm->list, &bpf_trace_modules);
}
break;
case MODULE_STATE_GOING:
list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
if (btm->module == module) {
list_del(&btm->list);
kfree(btm);
break;
}
}
break;
}
mutex_unlock(&bpf_module_mutex);
return 0;
}
static struct notifier_block bpf_module_nb = {
.notifier_call = bpf_event_notify,
};
int __init bpf_event_init(void)
{
register_module_notifier(&bpf_module_nb);
return 0;
}
fs_initcall(bpf_event_init);
#endif /* CONFIG_MODULES */
......@@ -6313,6 +6313,9 @@ static bool sk_msg_is_valid_access(int off, int size,
if (type == BPF_WRITE)
return false;
if (off % size != 0)
return false;
switch (off) {
case offsetof(struct sk_msg_md, data):
info->reg_type = PTR_TO_PACKET;
......@@ -6324,16 +6327,20 @@ static bool sk_msg_is_valid_access(int off, int size,
if (size != sizeof(__u64))
return false;
break;
default:
case bpf_ctx_range(struct sk_msg_md, family):
case bpf_ctx_range(struct sk_msg_md, remote_ip4):
case bpf_ctx_range(struct sk_msg_md, local_ip4):
case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
case bpf_ctx_range(struct sk_msg_md, remote_port):
case bpf_ctx_range(struct sk_msg_md, local_port):
case bpf_ctx_range(struct sk_msg_md, size):
if (size != sizeof(__u32))
return false;
}
if (off < 0 || off >= sizeof(struct sk_msg_md))
return false;
if (off % size != 0)
break;
default:
return false;
}
return true;
}
......@@ -7418,6 +7425,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
int off;
#endif
/* convert ctx uses the fact sg element is first in struct */
BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
switch (si->off) {
case offsetof(struct sk_msg_md, data):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
......@@ -7530,6 +7540,12 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
offsetof(struct sock_common, skc_num));
break;
case offsetof(struct sk_msg_md, size):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
si->dst_reg, si->src_reg,
offsetof(struct sk_msg_sg, size));
break;
}
return insn - insn_buf;
......
......@@ -403,7 +403,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
msg->skb = skb;
sk_psock_queue_msg(psock, msg);
sk->sk_data_ready(sk);
sk_psock_data_ready(sk, psock);
return copied;
}
......@@ -572,6 +572,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
{
rcu_assign_sk_user_data(sk, NULL);
sk_psock_cork_free(psock);
sk_psock_zap_ingress(psock);
sk_psock_restore_proto(sk, psock);
write_lock_bh(&sk->sk_callback_lock);
......@@ -669,6 +670,22 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
bool ingress;
switch (verdict) {
case __SK_PASS:
sk_other = psock->sk;
if (sock_flag(sk_other, SOCK_DEAD) ||
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
goto out_free;
}
if (atomic_read(&sk_other->sk_rmem_alloc) <=
sk_other->sk_rcvbuf) {
struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
tcp->bpf.flags |= BPF_F_INGRESS;
skb_queue_tail(&psock->ingress_skb, skb);
schedule_work(&psock->work);
break;
}
goto out_free;
case __SK_REDIRECT:
sk_other = tcp_skb_bpf_redirect_fetch(skb);
if (unlikely(!sk_other))
......@@ -735,7 +752,7 @@ static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
}
/* Called with socket lock held. */
static void sk_psock_data_ready(struct sock *sk)
static void sk_psock_strp_data_ready(struct sock *sk)
{
struct sk_psock *psock;
......@@ -783,7 +800,7 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
return;
parser->saved_data_ready = sk->sk_data_ready;
sk->sk_data_ready = sk_psock_data_ready;
sk->sk_data_ready = sk_psock_strp_data_ready;
sk->sk_write_space = sk_psock_write_space;
parser->enabled = true;
}
......
......@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <net/inet_common.h>
#include <net/tls.h>
static bool tcp_bpf_stream_read(const struct sock *sk)
{
......@@ -198,7 +199,7 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
msg->sg.start = i;
msg->sg.size -= apply_bytes;
sk_psock_queue_msg(psock, tmp);
sk->sk_data_ready(sk);
sk_psock_data_ready(sk, psock);
} else {
sk_msg_free(sk, tmp);
kfree(tmp);
......@@ -218,6 +219,8 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
u32 off;
while (1) {
bool has_tx_ulp;
sge = sk_msg_elem(msg, msg->sg.start);
size = (apply && apply_bytes < sge->length) ?
apply_bytes : sge->length;
......@@ -226,7 +229,15 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
tcp_rate_check_app_limited(sk);
retry:
ret = do_tcp_sendpages(sk, page, off, size, flags);
has_tx_ulp = tls_sw_has_ctx_tx(sk);
if (has_tx_ulp) {
flags |= MSG_SENDPAGE_NOPOLICY;
ret = kernel_sendpage_locked(sk,
page, off, size, flags);
} else {
ret = do_tcp_sendpages(sk, page, off, size, flags);
}
if (ret <= 0)
return ret;
if (apply)
......
......@@ -55,6 +55,8 @@ enum {
static struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
static struct proto *saved_tcpv4_prot;
static DEFINE_MUTEX(tcpv4_prot_mutex);
static LIST_HEAD(device_list);
static DEFINE_SPINLOCK(device_spinlock);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
......@@ -700,6 +702,16 @@ static int tls_init(struct sock *sk)
mutex_unlock(&tcpv6_prot_mutex);
}
if (ip_ver == TLSV4 &&
unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
mutex_lock(&tcpv4_prot_mutex);
if (likely(sk->sk_prot != saved_tcpv4_prot)) {
build_protos(tls_prots[TLSV4], sk->sk_prot);
smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
}
mutex_unlock(&tcpv4_prot_mutex);
}
ctx->tx_conf = TLS_BASE;
ctx->rx_conf = TLS_BASE;
update_sk_prot(sk, ctx);
......@@ -731,8 +743,6 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
static int __init tls_register(void)
{
build_protos(tls_prots[TLSV4], &tcp_prot);
tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
......
......@@ -686,12 +686,13 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
struct sk_psock *psock;
struct sock *sk_redir;
struct tls_rec *rec;
bool enospc, policy;
int err = 0, send;
u32 delta = 0;
bool enospc;
policy = !(flags & MSG_SENDPAGE_NOPOLICY);
psock = sk_psock_get(sk);
if (!psock)
if (!psock || !policy)
return tls_push_record(sk, flags, record_type);
more_data:
enospc = sk_msg_full(msg);
......@@ -1017,8 +1018,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
return copied ? copied : ret;
}
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
int tls_sw_do_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
......@@ -1033,15 +1034,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
int ret = 0;
bool eor;
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SENDPAGE_NOTLAST))
return -ENOTSUPP;
/* No MSG_EOR from splice, only look at MSG_MORE */
eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
lock_sock(sk);
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
/* Wait till there is any pending write on socket */
......@@ -1145,10 +1138,34 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
}
sendpage_end:
ret = sk_stream_error(sk, flags, ret);
release_sock(sk);
return copied ? copied : ret;
}
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
return -ENOTSUPP;
return tls_sw_do_sendpage(sk, page, offset, size, flags);
}
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
int ret;
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
return -ENOTSUPP;
lock_sock(sk);
ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
release_sock(sk);
return ret;
}
static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
int flags, long timeo, int *err)
{
......
......@@ -366,6 +366,7 @@ static int xsk_release(struct socket *sock)
xskq_destroy(xs->rx);
xskq_destroy(xs->tx);
xdp_put_umem(xs->umem);
sock_orphan(sk);
sock->sk = NULL;
......@@ -713,18 +714,6 @@ static const struct proto_ops xsk_proto_ops = {
.sendpage = sock_no_sendpage,
};
static void xsk_destruct(struct sock *sk)
{
struct xdp_sock *xs = xdp_sk(sk);
if (!sock_flag(sk, SOCK_DEAD))
return;
xdp_put_umem(xs->umem);
sk_refcnt_debug_dec(sk);
}
static int xsk_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
......@@ -751,9 +740,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk->sk_family = PF_XDP;
sk->sk_destruct = xsk_destruct;
sk_refcnt_debug_inc(sk);
sock_set_flag(sk, SOCK_RCU_FREE);
xs = xdp_sk(sk);
......
......@@ -128,6 +128,10 @@ OPTIONS
-f, --bpffs
Show file names of pinned maps.
-n, --nomount
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
EXAMPLES
========
**# bpftool map show**
......@@ -170,6 +174,61 @@ The following three commands are equivalent:
| **# bpftool map pin id 10 /sys/fs/bpf/map**
| **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00**
Note that map update can also be used in order to change the program references
hold by a program array map. This can be used, for example, to change the
programs used for tail-call jumps at runtime, without having to reload the
entry-point program. Below is an example for this use case: we load a program
defining a prog array map, and with a main function that contains a tail call
to other programs that can be used either to "process" packets or to "debug"
processing. Note that the prog array map MUST be pinned into the BPF virtual
file system for the map update to work successfully, as kernel flushes prog
array maps when they have no more references from user space (and the update
would be lost as soon as bpftool exits).
|
| **# bpftool prog loadall tail_calls.o /sys/fs/bpf/foo type xdp**
| **# bpftool prog --bpffs**
::
545: xdp name main_func tag 674b4b5597193dc3 gpl
loaded_at 2018-12-12T15:02:58+0000 uid 0
xlated 240B jited 257B memlock 4096B map_ids 294
pinned /sys/fs/bpf/foo/xdp
546: xdp name bpf_func_process tag e369a529024751fc gpl
loaded_at 2018-12-12T15:02:58+0000 uid 0
xlated 200B jited 164B memlock 4096B
pinned /sys/fs/bpf/foo/process
547: xdp name bpf_func_debug tag 0b597868bc7f0976 gpl
loaded_at 2018-12-12T15:02:58+0000 uid 0
xlated 200B jited 164B memlock 4096B
pinned /sys/fs/bpf/foo/debug
**# bpftool map**
::
294: prog_array name jmp_table flags 0x0
key 4B value 4B max_entries 1 memlock 4096B
owner_prog_type xdp owner jited
|
| **# bpftool map pin id 294 /sys/fs/bpf/bar**
| **# bpftool map dump pinned /sys/fs/bpf/bar**
::
Found 0 elements
|
| **# bpftool map update pinned /sys/fs/bpf/bar key 0 0 0 0 value pinned /sys/fs/bpf/foo/debug**
| **# bpftool map dump pinned /sys/fs/bpf/bar**
::
key: 00 00 00 00 value: 22 02 00 00
Found 1 element
SEE ALSO
========
**bpf**\ (2),
......
......@@ -158,83 +158,98 @@ OPTIONS
When showing BPF programs, show file names of pinned
programs.
-m, --mapcompat
Allow loading maps with unknown map definitions.
-n, --nomount
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
EXAMPLES
========
**# bpftool prog show**
::
10: xdp name some_prog tag 005a3d2123620c8b gpl
loaded_at Sep 29/20:11 uid 0
xlated 528B jited 370B memlock 4096B map_ids 10
10: xdp name some_prog tag 005a3d2123620c8b gpl
loaded_at 2017-09-29T20:11:00+0000 uid 0
xlated 528B jited 370B memlock 4096B map_ids 10
**# bpftool --json --pretty prog show**
::
{
"programs": [{
"id": 10,
"type": "xdp",
"tag": "005a3d2123620c8b",
"gpl_compatible": true,
"loaded_at": "Sep 29/20:11",
"uid": 0,
"bytes_xlated": 528,
"jited": true,
"bytes_jited": 370,
"bytes_memlock": 4096,
"map_ids": [10
]
}
]
}
[{
"id": 10,
"type": "xdp",
"tag": "005a3d2123620c8b",
"gpl_compatible": true,
"loaded_at": 1506715860,
"uid": 0,
"bytes_xlated": 528,
"jited": true,
"bytes_jited": 370,
"bytes_memlock": 4096,
"map_ids": [10
]
}
]
|
| **# bpftool prog dump xlated id 10 file /tmp/t**
| **# ls -l /tmp/t**
| -rw------- 1 root root 560 Jul 22 01:42 /tmp/t
**# bpftool prog dum jited tag 005a3d2123620c8b**
::
-rw------- 1 root root 560 Jul 22 01:42 /tmp/t
**# bpftool prog dump jited tag 005a3d2123620c8b**
::
push %rbp
mov %rsp,%rbp
sub $0x228,%rsp
sub $0x28,%rbp
mov %rbx,0x0(%rbp)
0: push %rbp
1: mov %rsp,%rbp
2: sub $0x228,%rsp
3: sub $0x28,%rbp
4: mov %rbx,0x0(%rbp)
|
| **# mount -t bpf none /sys/fs/bpf/**
| **# bpftool prog pin id 10 /sys/fs/bpf/prog**
| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2**
| **# ls -l /sys/fs/bpf/**
| -rw------- 1 root root 0 Jul 22 01:43 prog
| -rw------- 1 root root 0 Jul 22 01:44 prog2
**# bpftool prog dum jited pinned /sys/fs/bpf/prog opcodes**
::
-rw------- 1 root root 0 Jul 22 01:43 prog
-rw------- 1 root root 0 Jul 22 01:44 prog2
**# bpftool prog dump jited pinned /sys/fs/bpf/prog opcodes**
::
push %rbp
55
mov %rsp,%rbp
48 89 e5
sub $0x228,%rsp
48 81 ec 28 02 00 00
sub $0x28,%rbp
48 83 ed 28
mov %rbx,0x0(%rbp)
48 89 5d 00
0: push %rbp
55
1: mov %rsp,%rbp
48 89 e5
4: sub $0x228,%rsp
48 81 ec 28 02 00 00
b: sub $0x28,%rbp
48 83 ed 28
f: mov %rbx,0x0(%rbp)
48 89 5d 00
|
| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7**
| **# bpftool prog show pinned /sys/fs/bpf/xdp1**
| 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl
| loaded_at 2018-06-25T16:17:31-0700 uid 0
| xlated 488B jited 336B memlock 4096B map_ids 7
| **# rm /sys/fs/bpf/xdp1**
|
::
9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl
loaded_at 2018-06-25T16:17:31-0700 uid 0
xlated 488B jited 336B memlock 4096B map_ids 7
**# rm /sys/fs/bpf/xdp1**
SEE ALSO
========
......
......@@ -60,6 +60,10 @@ OPTIONS
-m, --mapcompat
Allow loading maps with unknown map definitions.
-n, --nomount
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
SEE ALSO
========
......
......@@ -35,8 +35,6 @@ $(LIBBPF)-clean:
prefix ?= /usr/local
bash_compdir ?= /usr/share/bash-completion/completions
CC = gcc
CFLAGS += -O2
CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow -Wno-missing-field-initializers
CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
......
# bpftool(8) bash completion -*- shell-script -*-
#
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright (C) 2017-2018 Netronome Systems, Inc.
#
# This software is dual licensed under the GNU General License
# Version 2, June 1991 as shown in the file COPYING in the top-level
# directory of this source tree or the BSD 2-Clause License provided
# below. You have the option to license this software under the
# complete terms of either license.
#
# The BSD 2-Clause License:
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quentin Monnet <quentin.monnet@netronome.com>
# Takes a list of words in argument; each one of them is added to COMPREPLY if
......
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
#include <ctype.h>
......@@ -73,20 +73,17 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
return ret;
}
static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int left_shift_bits, right_shift_bits;
int nr_bits = BTF_INT_BITS(int_type);
int total_bits_offset;
int bytes_to_copy;
int bits_to_copy;
__u64 print_num;
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
data += BITS_ROUNDDOWN_BYTES(bit_offset);
bit_offset = BITS_PER_BYTE_MASKED(bit_offset);
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
......@@ -109,6 +106,22 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
jsonw_printf(jw, "%llu", print_num);
}
static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int nr_bits = BTF_INT_BITS(int_type);
int total_bits_offset;
/* bits_offset is at most 7.
* BTF_INT_OFFSET() cannot exceed 64 bits.
*/
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw,
is_plain_text);
}
static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
......@@ -180,6 +193,7 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
const struct btf_type *t;
struct btf_member *m;
const void *data_off;
int kind_flag;
int ret = 0;
int i, vlen;
......@@ -187,18 +201,32 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
if (!t)
return -EINVAL;
kind_flag = BTF_INFO_KFLAG(t->info);
vlen = BTF_INFO_VLEN(t->info);
jsonw_start_object(d->jw);
m = (struct btf_member *)(t + 1);
for (i = 0; i < vlen; i++) {
data_off = data + BITS_ROUNDDOWN_BYTES(m[i].offset);
__u32 bit_offset = m[i].offset;
__u32 bitfield_size = 0;
if (kind_flag) {
bitfield_size = BTF_MEMBER_BITFIELD_SIZE(bit_offset);
bit_offset = BTF_MEMBER_BIT_OFFSET(bit_offset);
}
jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
ret = btf_dumper_do_type(d, m[i].type,
BITS_PER_BYTE_MASKED(m[i].offset),
data_off);
if (ret)
break;
if (bitfield_size) {
btf_dumper_bitfield(bitfield_size, bit_offset,
data, d->jw, d->is_plain_text);
} else {
data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
ret = btf_dumper_do_type(d, m[i].type,
BITS_PER_BYTE_MASKED(bit_offset),
data_off);
if (ret)
break;
}
}
jsonw_end_object(d->jw);
......@@ -285,6 +313,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_INT:
case BTF_KIND_TYPEDEF:
BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_STRUCT:
......@@ -308,10 +337,11 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
BTF_PRINT_TYPE(t->type);
BTF_PRINT_ARG("* ");
break;
case BTF_KIND_UNKN:
case BTF_KIND_FWD:
case BTF_KIND_TYPEDEF:
return -1;
BTF_PRINT_ARG("%s %s ",
BTF_INFO_KFLAG(t->info) ? "union" : "struct",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_VOLATILE:
BTF_PRINT_ARG("volatile ");
BTF_PRINT_TYPE(t->type);
......@@ -335,6 +365,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
if (pos == -1)
return -1;
break;
case BTF_KIND_UNKN:
default:
return -1;
}
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/list.h>
#include <stdlib.h>
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __BPF_TOOL_CFG_H
#define __BPF_TOOL_CFG_H
......
// SPDX-License-Identifier: GPL-2.0+
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2017 Facebook
// Author: Roman Gushchin <guro@fb.com>
......
/*
* Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <ctype.h>
#include <errno.h>
......@@ -58,7 +28,7 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
void p_err(const char *fmt, ...)
void __printf(1, 2) p_err(const char *fmt, ...)
{
va_list ap;
......@@ -76,7 +46,7 @@ void p_err(const char *fmt, ...)
va_end(ap);
}
void p_info(const char *fmt, ...)
void __printf(1, 2) p_info(const char *fmt, ...)
{
va_list ap;
......@@ -106,7 +76,8 @@ void set_max_rlimit(void)
setrlimit(RLIMIT_MEMLOCK, &rinf);
}
static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
static int
mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
{
bool bind_done = false;
......@@ -128,15 +99,29 @@ static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
bind_done = true;
}
if (mount("bpf", target, "bpf", 0, "mode=0700")) {
snprintf(buff, bufflen, "mount -t bpf bpf %s failed: %s",
target, strerror(errno));
if (mount(type, target, type, 0, "mode=0700")) {
snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
type, type, target, strerror(errno));
return -1;
}
return 0;
}
int mount_tracefs(const char *target)
{
char err_str[ERR_MAX_LEN];
int err;
err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
if (err) {
err_str[ERR_MAX_LEN - 1] = '\0';
p_err("can't mount tracefs: %s", err_str);
}
return err;
}
int open_obj_pinned(char *path, bool quiet)
{
int fd;
......@@ -192,7 +177,13 @@ int mount_bpffs_for_pin(const char *name)
/* nothing to do if already mounted */
goto out_free;
err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
if (block_mount) {
p_err("no BPF file system found, not mounting it due to --nomount option");
err = -1;
goto out_free;
}
err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
if (err) {
err_str[ERR_MAX_LEN - 1] = '\0';
p_err("can't mount BPF file system to pin the object (%s): %s",
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Based on:
*
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Simple streaming JSON writer
*
......@@ -19,6 +20,7 @@
#include <malloc.h>
#include <inttypes.h>
#include <stdint.h>
#include <linux/compiler.h>
#include "json_writer.h"
......@@ -156,7 +158,8 @@ void jsonw_name(json_writer_t *self, const char *name)
putc(' ', self->out);
}
void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
void __printf(2, 0)
jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
{
jsonw_eor(self);
putc('"', self->out);
......@@ -164,7 +167,7 @@ void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
putc('"', self->out);
}
void jsonw_printf(json_writer_t *self, const char *fmt, ...)
void __printf(2, 3) jsonw_printf(json_writer_t *self, const char *fmt, ...)
{
va_list ap;
......
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/*
* Simple streaming JSON writer
*
......
/*
* Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <ctype.h>
#include <errno.h>
......@@ -54,6 +24,7 @@ json_writer_t *json_wtr;
bool pretty_output;
bool json_output;
bool show_pinned;
bool block_mount;
int bpf_flags;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
......@@ -343,6 +314,7 @@ int main(int argc, char **argv)
{ "version", no_argument, NULL, 'V' },
{ "bpffs", no_argument, NULL, 'f' },
{ "mapcompat", no_argument, NULL, 'm' },
{ "nomount", no_argument, NULL, 'n' },
{ 0 }
};
int opt, ret;
......@@ -351,13 +323,14 @@ int main(int argc, char **argv)
pretty_output = false;
json_output = false;
show_pinned = false;
block_mount = false;
bin_name = argv[0];
hash_init(prog_table.table);
hash_init(map_table.table);
opterr = 0;
while ((opt = getopt_long(argc, argv, "Vhpjfm",
while ((opt = getopt_long(argc, argv, "Vhpjfmn",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
......@@ -384,6 +357,9 @@ int main(int argc, char **argv)
case 'm':
bpf_flags = MAPS_RELAX_COMPAT;
break;
case 'n':
block_mount = true;
break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
......
/*
* Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef __BPF_TOOL_H
#define __BPF_TOOL_H
......@@ -74,7 +44,8 @@
#define HELP_SPEC_PROGRAM \
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }"
#define HELP_SPEC_OPTIONS \
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} | {-m|--mapcompat}"
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} |\n" \
"\t {-m|--mapcompat} | {-n|--nomount} }"
#define HELP_SPEC_MAP \
"MAP := { id MAP_ID | pinned FILE }"
......@@ -115,6 +86,7 @@ extern const char *bin_name;
extern json_writer_t *json_wtr;
extern bool json_output;
extern bool show_pinned;
extern bool block_mount;
extern int bpf_flags;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;
......@@ -128,6 +100,8 @@ void usage(void) __noreturn;
void set_max_rlimit(void);
int mount_tracefs(const char *target);
struct pinned_obj_table {
DECLARE_HASHTABLE(table, 16);
};
......@@ -177,8 +151,8 @@ int prog_parse_fd(int *argc, char ***argv);
int map_parse_fd(int *argc, char ***argv);
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
#ifdef HAVE_LIBBFD_SUPPORT
struct bpf_prog_linfo;
#ifdef HAVE_LIBBFD_SUPPORT
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
const char *arch, const char *disassembler_options,
const struct btf *btf,
......
/*
* Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <assert.h>
#include <errno.h>
......
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
/* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
......
// SPDX-License-Identifier: GPL-2.0+
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
#define _GNU_SOURCE
......
// SPDX-License-Identifier: GPL-2.0+
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
#include <stdlib.h>
......
// SPDX-License-Identifier: GPL-2.0+
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
// Copyright (C) 2018 Facebook
#ifndef _NETLINK_DUMPER_H_
......
// SPDX-License-Identifier: GPL-2.0+
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
// Author: Yonghong Song <yhs@fb.com>
......
/*
* Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#define _GNU_SOURCE
#include <errno.h>
......@@ -62,7 +32,7 @@ static const char * const attach_type_strings[] = {
[__MAX_BPF_ATTACH_TYPE] = NULL,
};
enum bpf_attach_type parse_attach_type(const char *str)
static enum bpf_attach_type parse_attach_type(const char *str)
{
enum bpf_attach_type type;
......@@ -626,13 +596,6 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
if (func_info && !info.func_info) {
/* kernel.kptr_restrict is set. No func_info available. */
free(func_info);
func_info = NULL;
nr_finfo = 0;
}
if (linfo && info.nr_line_info != nr_linfo) {
p_err("incorrect nr_line_info %u vs. expected %u",
info.nr_line_info, nr_linfo);
......@@ -835,7 +798,7 @@ struct map_replace {
char *name;
};
int map_replace_compar(const void *p1, const void *p2)
static int map_replace_compar(const void *p1, const void *p2)
{
const struct map_replace *a = p1, *b = p2;
......
......@@ -54,7 +54,7 @@ find_tracefs_mnt_single(unsigned long magic, char *mnt, const char *mntpt)
return true;
}
static bool find_tracefs_pipe(char *mnt)
static bool get_tracefs_pipe(char *mnt)
{
static const char * const known_mnts[] = {
"/sys/kernel/debug/tracing",
......@@ -88,7 +88,20 @@ static bool find_tracefs_pipe(char *mnt)
fclose(fp);
/* The string from fscanf() might be truncated, check mnt is valid */
if (!found || validate_tracefs_mnt(mnt, TRACEFS_MAGIC))
if (found && validate_tracefs_mnt(mnt, TRACEFS_MAGIC))
goto exit_found;
if (block_mount)
return false;
p_info("could not find tracefs, attempting to mount it now");
/* Most of the time, tracefs is automatically mounted by debugfs at
* /sys/kernel/debug/tracing when we try to access it. If we could not
* find it, it is likely that debugfs is not mounted. Let's give one
* attempt at mounting just tracefs at /sys/kernel/tracing.
*/
strcpy(mnt, known_mnts[1]);
if (mount_tracefs(mnt))
return false;
exit_found:
......@@ -115,17 +128,13 @@ int do_tracelog(int argc, char **argv)
.sa_handler = exit_tracelog
};
char trace_pipe[PATH_MAX];
bool found_trace_pipe;
size_t buff_len = 0;
if (json_output)
jsonw_start_array(json_wtr);
found_trace_pipe = find_tracefs_pipe(trace_pipe);
if (!found_trace_pipe) {
p_err("could not find trace pipe, tracefs not mounted?");
if (!get_tracefs_pipe(trace_pipe))
return -1;
}
trace_pipe_fd = fopen(trace_pipe, "r");
if (!trace_pipe_fd) {
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* Copyright (C) 2018 Netronome Systems, Inc. */
#define _GNU_SOURCE
#include <stdarg.h>
......@@ -115,7 +81,7 @@ struct kernel_sym *kernel_syms_search(struct dump_data *dd,
sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
}
static void print_insn(void *private_data, const char *fmt, ...)
static void __printf(2, 3) print_insn(void *private_data, const char *fmt, ...)
{
va_list args;
......@@ -124,7 +90,7 @@ static void print_insn(void *private_data, const char *fmt, ...)
va_end(args);
}
static void
static void __printf(2, 3)
print_insn_for_graph(void *private_data, const char *fmt, ...)
{
char buf[64], *p;
......@@ -155,7 +121,8 @@ print_insn_for_graph(void *private_data, const char *fmt, ...)
printf("%s", buf);
}
static void print_insn_json(void *private_data, const char *fmt, ...)
static void __printf(2, 3)
print_insn_json(void *private_data, const char *fmt, ...)
{
unsigned int l = strlen(fmt);
char chomped_fmt[l];
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __BPF_TOOL_XLATED_DUMPER_H
#define __BPF_TOOL_XLATED_DUMPER_H
......
......@@ -133,6 +133,14 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK,
};
/* Note that tracing related programs such as
* BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
* are not subject to a stable API since kernel internal data
* structures can change from release to release and may
* therefore break existing tracing BPF programs. Tracing BPF
* programs correspond to /a/ specific kernel which is to be
* analyzed, and not /a/ specific kernel /and/ all future ones.
*/
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
......@@ -343,7 +351,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 kern_version; /* not used */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_ifindex; /* ifindex of netdev to prep for */
......@@ -2657,6 +2665,7 @@ struct sk_msg_md {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
__u32 size; /* Total size of sk_msg */
};
struct sk_reuseport_md {
......@@ -2717,6 +2726,8 @@ struct bpf_prog_info {
__u32 nr_jited_line_info;
__u32 line_info_rec_size;
__u32 jited_line_info_rec_size;
__u32 nr_prog_tags;
__aligned_u64 prog_tags;
} __attribute__((aligned(8)));
struct bpf_map_info {
......
......@@ -34,7 +34,9 @@ struct btf_type {
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
* bits 28-31: unused
* bits 28-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
__u32 info;
/* "size" is used by INT, ENUM, STRUCT and UNION.
......@@ -52,6 +54,7 @@ struct btf_type {
#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
#define BTF_INFO_KFLAG(info) ((info) >> 31)
#define BTF_KIND_UNKN 0 /* Unknown */
#define BTF_KIND_INT 1 /* Integer */
......@@ -110,9 +113,22 @@ struct btf_array {
struct btf_member {
__u32 name_off;
__u32 type;
__u32 offset; /* offset in bits */
/* If the type info kind_flag is set, the btf_member offset
* contains both member bitfield size and bit offset. The
* bitfield size is set for bitfield members. If the type
* info kind_flag is not set, the offset contains only bit
* offset.
*/
__u32 offset;
};
/* If the struct/union type info kind_flag is set, the
* following two macros are used to access bitfield_size
* and bit_offset from btf_member.offset.
*/
#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
* The exact number of btf_param is stored in the vlen (of the
* info in "struct btf_type").
......
......@@ -107,11 +107,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
nr_linfo = info->nr_line_info;
/*
* Test !info->line_info because the kernel may NULL
* the ptr if kernel.kptr_restrict is set.
*/
if (!nr_linfo || !info->line_info)
if (!nr_linfo)
return NULL;
/*
......
......@@ -266,6 +266,7 @@ void bpf_program__unload(struct bpf_program *prog)
zclose(prog->btf_fd);
zfree(&prog->func_info);
zfree(&prog->line_info);
}
static void bpf_program__exit(struct bpf_program *prog)
......
......@@ -27,3 +27,4 @@ test_flow_dissector
flow_dissector_load
test_netcnt
test_section_names
test_tcpnotify_user
......@@ -35,9 +35,11 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 0;
else if (ctx->type == SOCK_STREAM)
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv4), 0, 0);
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv4),
BPF_F_CURRENT_NETNS, 0);
else
sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv4), 0, 0);
sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv4),
BPF_F_CURRENT_NETNS, 0);
if (!sk)
return 0;
......
......@@ -47,9 +47,11 @@ int connect_v6_prog(struct bpf_sock_addr *ctx)
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 0;
else if (ctx->type == SOCK_STREAM)
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv6), 0, 0);
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv6),
BPF_F_CURRENT_NETNS, 0);
else
sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv6), 0, 0);
sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv6),
BPF_F_CURRENT_NETNS, 0);
if (!sk)
return 0;
......
......@@ -16,12 +16,18 @@ struct bpf_map_def SEC("maps") percpu_netcnt = {
.value_size = sizeof(struct percpu_net_cnt),
};
BPF_ANNOTATE_KV_PAIR(percpu_netcnt, struct bpf_cgroup_storage_key,
struct percpu_net_cnt);
struct bpf_map_def SEC("maps") netcnt = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct net_cnt),
};
BPF_ANNOTATE_KV_PAIR(netcnt, struct bpf_cgroup_storage_key,
struct net_cnt);
SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
{
......
This diff is collapsed.
......@@ -51,10 +51,10 @@ static struct {
struct iphdr iph;
struct tcphdr tcp;
} __packed pkt_v4 = {
.eth.h_proto = bpf_htons(ETH_P_IP),
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = 6,
.iph.tot_len = bpf_htons(MAGIC_BYTES),
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
};
......@@ -64,9 +64,9 @@ static struct {
struct ipv6hdr iph;
struct tcphdr tcp;
} __packed pkt_v6 = {
.eth.h_proto = bpf_htons(ETH_P_IPV6),
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = 6,
.iph.payload_len = bpf_htons(MAGIC_BYTES),
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
};
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment