Commit dca73a65 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2019-06-19

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) new SO_REUSEPORT_DETACH_BPF setsocktopt, from Martin.

2) BTF based map definition, from Andrii.

3) support bpf_map_lookup_elem for xskmap, from Jonathan.

4) bounded loops and scalar precision logic in the verifier, from Alexei.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 497ad9f5 94079b64
...@@ -122,6 +122,8 @@ ...@@ -122,6 +122,8 @@
#define SO_RCVTIMEO_NEW 66 #define SO_RCVTIMEO_NEW 66
#define SO_SNDTIMEO_NEW 67 #define SO_SNDTIMEO_NEW 67
#define SO_DETACH_REUSEPORT_BPF 68
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 #if __BITS_PER_LONG == 64
......
...@@ -133,6 +133,8 @@ ...@@ -133,6 +133,8 @@
#define SO_RCVTIMEO_NEW 66 #define SO_RCVTIMEO_NEW 66
#define SO_SNDTIMEO_NEW 67 #define SO_SNDTIMEO_NEW 67
#define SO_DETACH_REUSEPORT_BPF 68
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 #if __BITS_PER_LONG == 64
......
...@@ -114,6 +114,8 @@ ...@@ -114,6 +114,8 @@
#define SO_RCVTIMEO_NEW 0x4040 #define SO_RCVTIMEO_NEW 0x4040
#define SO_SNDTIMEO_NEW 0x4041 #define SO_SNDTIMEO_NEW 0x4041
#define SO_DETACH_REUSEPORT_BPF 0x4042
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 #if __BITS_PER_LONG == 64
......
...@@ -115,6 +115,8 @@ ...@@ -115,6 +115,8 @@
#define SO_RCVTIMEO_NEW 0x0044 #define SO_RCVTIMEO_NEW 0x0044
#define SO_SNDTIMEO_NEW 0x0045 #define SO_SNDTIMEO_NEW 0x0045
#define SO_DETACH_REUSEPORT_BPF 0x0047
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
......
...@@ -277,6 +277,7 @@ enum bpf_reg_type { ...@@ -277,6 +277,7 @@ enum bpf_reg_type {
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
}; };
/* The information passed from prog-specific *_is_valid_access /* The information passed from prog-specific *_is_valid_access
...@@ -1098,6 +1099,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, ...@@ -1098,6 +1099,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf, struct bpf_insn *insn_buf,
struct bpf_prog *prog, struct bpf_prog *prog,
u32 *target_size); u32 *target_size);
bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);
u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
#else #else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size, static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
enum bpf_access_type type, enum bpf_access_type type,
...@@ -1114,6 +1124,21 @@ static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, ...@@ -1114,6 +1124,21 @@ static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
{ {
return 0; return 0;
} }
static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
return false;
}
static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size)
{
return 0;
}
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
#endif /* _LINUX_BPF_H */ #endif /* _LINUX_BPF_H */
...@@ -136,6 +136,8 @@ struct bpf_reg_state { ...@@ -136,6 +136,8 @@ struct bpf_reg_state {
*/ */
s32 subreg_def; s32 subreg_def;
enum bpf_reg_liveness live; enum bpf_reg_liveness live;
/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
bool precise;
}; };
enum bpf_stack_slot_type { enum bpf_stack_slot_type {
...@@ -187,14 +189,77 @@ struct bpf_func_state { ...@@ -187,14 +189,77 @@ struct bpf_func_state {
struct bpf_stack_state *stack; struct bpf_stack_state *stack;
}; };
struct bpf_idx_pair {
u32 prev_idx;
u32 idx;
};
#define MAX_CALL_FRAMES 8 #define MAX_CALL_FRAMES 8
struct bpf_verifier_state { struct bpf_verifier_state {
/* call stack tracking */ /* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES]; struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
/*
* 'branches' field is the number of branches left to explore:
* 0 - all possible paths from this state reached bpf_exit or
* were safely pruned
* 1 - at least one path is being explored.
* This state hasn't reached bpf_exit
* 2 - at least two paths are being explored.
* This state is an immediate parent of two children.
* One is fallthrough branch with branches==1 and another
* state is pushed into stack (to be explored later) also with
* branches==1. The parent of this state has branches==1.
* The verifier state tree connected via 'parent' pointer looks like:
* 1
* 1
* 2 -> 1 (first 'if' pushed into stack)
* 1
* 2 -> 1 (second 'if' pushed into stack)
* 1
* 1
* 1 bpf_exit.
*
* Once do_check() reaches bpf_exit, it calls update_branch_counts()
* and the verifier state tree will look:
* 1
* 1
* 2 -> 1 (first 'if' pushed into stack)
* 1
* 1 -> 1 (second 'if' pushed into stack)
* 0
* 0
* 0 bpf_exit.
* After pop_stack() the do_check() will resume at second 'if'.
*
* If is_state_visited() sees a state with branches > 0 it means
* there is a loop. If such state is exactly equal to the current state
* it's an infinite loop. Note states_equal() checks for states
* equvalency, so two states being 'states_equal' does not mean
* infinite loop. The exact comparison is provided by
* states_maybe_looping() function. It's a stronger pre-check and
* much faster than states_equal().
*
* This algorithm may not find all possible infinite loops or
* loop iteration count may be too high.
* In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
*/
u32 branches;
u32 insn_idx; u32 insn_idx;
u32 curframe; u32 curframe;
u32 active_spin_lock; u32 active_spin_lock;
bool speculative; bool speculative;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
u32 last_insn_idx;
/* jmp history recorded from first to last.
* backtracking is using it to go from last to first.
* For most states jmp_history_cnt is [0-3].
* For loops can go up to ~40.
*/
struct bpf_idx_pair *jmp_history;
u32 jmp_history_cnt;
}; };
#define bpf_get_spilled_reg(slot, frame) \ #define bpf_get_spilled_reg(slot, frame) \
...@@ -309,7 +374,9 @@ struct bpf_verifier_env { ...@@ -309,7 +374,9 @@ struct bpf_verifier_env {
} cfg; } cfg;
u32 subprog_cnt; u32 subprog_cnt;
/* number of instructions analyzed by the verifier */ /* number of instructions analyzed by the verifier */
u32 insn_processed; u32 prev_insn_processed, insn_processed;
/* number of jmps, calls, exits analyzed so far */
u32 prev_jmps_processed, jmps_processed;
/* total verification time */ /* total verification time */
u64 verification_time; u64 verification_time;
/* maximum number of verifier states kept in 'branching' instructions */ /* maximum number of verifier states kept in 'branching' instructions */
......
...@@ -35,6 +35,8 @@ extern struct sock *reuseport_select_sock(struct sock *sk, ...@@ -35,6 +35,8 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
int hdr_len); int hdr_len);
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
extern int reuseport_detach_prog(struct sock *sk);
int reuseport_get_id(struct sock_reuseport *reuse); int reuseport_get_id(struct sock_reuseport *reuse);
#endif /* _SOCK_REUSEPORT_H */ #endif /* _SOCK_REUSEPORT_H */
...@@ -58,11 +58,11 @@ struct xdp_sock { ...@@ -58,11 +58,11 @@ struct xdp_sock {
struct xdp_umem *umem; struct xdp_umem *umem;
struct list_head flush_node; struct list_head flush_node;
u16 queue_id; u16 queue_id;
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head list;
bool zc; bool zc;
/* Protects multiple processes in the control path */ /* Protects multiple processes in the control path */
struct mutex mutex; struct mutex mutex;
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head list;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths /* Mutual exclusion of NAPI TX thread and sendmsg error paths
* in the SKB destructor callback. * in the SKB destructor callback.
*/ */
......
...@@ -117,6 +117,8 @@ ...@@ -117,6 +117,8 @@
#define SO_RCVTIMEO_NEW 66 #define SO_RCVTIMEO_NEW 66
#define SO_SNDTIMEO_NEW 67 #define SO_SNDTIMEO_NEW 67
#define SO_DETACH_REUSEPORT_BPF 68
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
......
...@@ -3085,6 +3085,10 @@ struct bpf_sock_tuple { ...@@ -3085,6 +3085,10 @@ struct bpf_sock_tuple {
}; };
}; };
struct bpf_xdp_sock {
__u32 queue_id;
};
#define XDP_PACKET_HEADROOM 256 #define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type. /* User return codes for XDP prog type.
...@@ -3245,6 +3249,7 @@ struct bpf_sock_addr { ...@@ -3245,6 +3249,7 @@ struct bpf_sock_addr {
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
* Stored in network byte order. * Stored in network byte order.
*/ */
__bpf_md_ptr(struct bpf_sock *, sk);
}; };
/* User bpf_sock_ops struct to access socket values and specify request ops /* User bpf_sock_ops struct to access socket values and specify request ops
...@@ -3296,6 +3301,7 @@ struct bpf_sock_ops { ...@@ -3296,6 +3301,7 @@ struct bpf_sock_ops {
__u32 sk_txhash; __u32 sk_txhash;
__u64 bytes_received; __u64 bytes_received;
__u64 bytes_acked; __u64 bytes_acked;
__bpf_md_ptr(struct bpf_sock *, sk);
}; };
/* Definitions for bpf_sock_ops_cb_flags */ /* Definitions for bpf_sock_ops_cb_flags */
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-y := core.o obj-y := core.o
CFLAGS_core.o += $(call cc-disable-warning, override-init)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
......
...@@ -80,8 +80,8 @@ static u64 dev_map_bitmap_size(const union bpf_attr *attr) ...@@ -80,8 +80,8 @@ static u64 dev_map_bitmap_size(const union bpf_attr *attr)
static struct bpf_map *dev_map_alloc(union bpf_attr *attr) static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
{ {
struct bpf_dtab *dtab; struct bpf_dtab *dtab;
int err = -EINVAL;
u64 cost; u64 cost;
int err;
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
......
This diff is collapsed.
...@@ -17,8 +17,8 @@ struct xsk_map { ...@@ -17,8 +17,8 @@ struct xsk_map {
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{ {
int cpu, err = -EINVAL;
struct xsk_map *m; struct xsk_map *m;
int cpu, err;
u64 cost; u64 cost;
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
...@@ -151,6 +151,12 @@ void __xsk_map_flush(struct bpf_map *map) ...@@ -151,6 +151,12 @@ void __xsk_map_flush(struct bpf_map *map)
} }
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key) static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return __xsk_map_lookup_elem(map, *(u32 *)key);
}
static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
{ {
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
...@@ -218,6 +224,7 @@ const struct bpf_map_ops xsk_map_ops = { ...@@ -218,6 +224,7 @@ const struct bpf_map_ops xsk_map_ops = {
.map_free = xsk_map_free, .map_free = xsk_map_free,
.map_get_next_key = xsk_map_get_next_key, .map_get_next_key = xsk_map_get_next_key,
.map_lookup_elem = xsk_map_lookup_elem, .map_lookup_elem = xsk_map_lookup_elem,
.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
.map_update_elem = xsk_map_update_elem, .map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem, .map_delete_elem = xsk_map_delete_elem,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
......
...@@ -5695,6 +5695,46 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) ...@@ -5695,6 +5695,46 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
return INET_ECN_set_ce(skb); return INET_ECN_set_ce(skb);
} }
bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
return false;
if (off % size != 0)
return false;
switch (off) {
default:
return size == sizeof(__u32);
}
}
u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{
struct bpf_insn *insn = insn_buf;
#define BPF_XDP_SOCK_GET(FIELD) \
do { \
BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) > \
FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
si->dst_reg, si->src_reg, \
offsetof(struct xdp_sock, FIELD)); \
} while (0)
switch (si->off) {
case offsetof(struct bpf_xdp_sock, queue_id):
BPF_XDP_SOCK_GET(queue_id);
break;
}
return insn - insn_buf;
}
static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = { static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
.func = bpf_skb_ecn_set_ce, .func = bpf_skb_ecn_set_ce,
.gpl_only = false, .gpl_only = false,
...@@ -5897,6 +5937,10 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5897,6 +5937,10 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skc_lookup_tcp: case BPF_FUNC_skc_lookup_tcp:
return &bpf_sock_addr_skc_lookup_tcp_proto; return &bpf_sock_addr_skc_lookup_tcp_proto;
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
...@@ -5934,6 +5978,10 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5934,6 +5978,10 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_storage_get_proto; return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete: case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto; return &bpf_sk_storage_delete_proto;
#ifdef CONFIG_SOCK_CGROUP_DATA
case BPF_FUNC_skb_cgroup_id:
return &bpf_skb_cgroup_id_proto;
#endif
#ifdef CONFIG_INET #ifdef CONFIG_INET
case BPF_FUNC_tcp_sock: case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto; return &bpf_tcp_sock_proto;
...@@ -6114,6 +6162,14 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6114,6 +6162,14 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_local_storage_proto; return &bpf_get_local_storage_proto;
case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_output:
return &bpf_sockopt_event_output_proto; return &bpf_sockopt_event_output_proto;
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
#ifdef CONFIG_INET
case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto;
#endif /* CONFIG_INET */
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
...@@ -6801,6 +6857,13 @@ static bool sock_addr_is_valid_access(int off, int size, ...@@ -6801,6 +6857,13 @@ static bool sock_addr_is_valid_access(int off, int size,
if (size != size_default) if (size != size_default)
return false; return false;
break; break;
case offsetof(struct bpf_sock_addr, sk):
if (type != BPF_READ)
return false;
if (size != sizeof(__u64))
return false;
info->reg_type = PTR_TO_SOCKET;
break;
default: default:
if (type == BPF_READ) { if (type == BPF_READ) {
if (size != size_default) if (size != size_default)
...@@ -6844,6 +6907,11 @@ static bool sock_ops_is_valid_access(int off, int size, ...@@ -6844,6 +6907,11 @@ static bool sock_ops_is_valid_access(int off, int size,
if (size != sizeof(__u64)) if (size != sizeof(__u64))
return false; return false;
break; break;
case offsetof(struct bpf_sock_ops, sk):
if (size != sizeof(__u64))
return false;
info->reg_type = PTR_TO_SOCKET_OR_NULL;
break;
default: default:
if (size != size_default) if (size != size_default)
return false; return false;
...@@ -7751,6 +7819,11 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, ...@@ -7751,6 +7819,11 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
struct bpf_sock_addr_kern, struct in6_addr, t_ctx, struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
break; break;
case offsetof(struct bpf_sock_addr, sk):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
si->dst_reg, si->src_reg,
offsetof(struct bpf_sock_addr_kern, sk));
break;
} }
return insn - insn_buf; return insn - insn_buf;
...@@ -8010,6 +8083,19 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, ...@@ -8010,6 +8083,19 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
struct sock, type); struct sock, type);
break; break;
case offsetof(struct bpf_sock_ops, sk):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern,
is_fullsock),
si->dst_reg, si->src_reg,
offsetof(struct bpf_sock_ops_kern,
is_fullsock));
*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern, sk),
si->dst_reg, si->src_reg,
offsetof(struct bpf_sock_ops_kern, sk));
break;
} }
return insn - insn_buf; return insn - insn_buf;
} }
......
...@@ -1039,6 +1039,10 @@ int sock_setsockopt(struct socket *sock, int level, int optname, ...@@ -1039,6 +1039,10 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
} }
break; break;
case SO_DETACH_REUSEPORT_BPF:
ret = reuseport_detach_prog(sk);
break;
case SO_DETACH_FILTER: case SO_DETACH_FILTER:
ret = sk_detach_filter(sk); ret = sk_detach_filter(sk);
break; break;
......
...@@ -332,3 +332,27 @@ int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) ...@@ -332,3 +332,27 @@ int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
return 0; return 0;
} }
EXPORT_SYMBOL(reuseport_attach_prog); EXPORT_SYMBOL(reuseport_attach_prog);
int reuseport_detach_prog(struct sock *sk)
{
struct sock_reuseport *reuse;
struct bpf_prog *old_prog;
if (!rcu_access_pointer(sk->sk_reuseport_cb))
return sk->sk_reuseport ? -ENOENT : -EINVAL;
old_prog = NULL;
spin_lock_bh(&reuseport_lock);
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
rcu_swap_protected(reuse->prog, old_prog,
lockdep_is_held(&reuseport_lock));
spin_unlock_bh(&reuseport_lock);
if (!old_prog)
return -ENOENT;
sk_reuseport_prog_free(old_prog);
return 0;
}
EXPORT_SYMBOL(reuseport_detach_prog);
...@@ -170,21 +170,12 @@ always += ibumad_kern.o ...@@ -170,21 +170,12 @@ always += ibumad_kern.o
always += hbm_out_kern.o always += hbm_out_kern.o
KBUILD_HOSTCFLAGS += -I$(objtree)/usr/include KBUILD_HOSTCFLAGS += -I$(objtree)/usr/include
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
HOSTCFLAGS_trace_helpers.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_trace_output_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_offwaketime_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_spintest_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_trace_event_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_sampleip_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_task_fd_query_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_xdp_sample_pkts_user.o += -I$(srctree)/tools/lib/bpf/
KBUILD_HOSTLDLIBS += $(LIBBPF) -lelf KBUILD_HOSTLDLIBS += $(LIBBPF) -lelf
HOSTLDLIBS_tracex4 += -lrt HOSTLDLIBS_tracex4 += -lrt
...@@ -206,6 +197,17 @@ HOSTCC = $(CROSS_COMPILE)gcc ...@@ -206,6 +197,17 @@ HOSTCC = $(CROSS_COMPILE)gcc
CLANG_ARCH_ARGS = -target $(ARCH) CLANG_ARCH_ARGS = -target $(ARCH)
endif endif
# Don't evaluate probes and warnings if we need to run make recursively
ifneq ($(src),)
HDR_PROBE := $(shell echo "\#include <linux/types.h>\n struct list_head { int a; }; int main() { return 0; }" | \
$(HOSTCC) $(KBUILD_HOSTCFLAGS) -x c - -o /dev/null 2>/dev/null && \
echo okay)
ifeq ($(HDR_PROBE),)
$(warning WARNING: Detected possible issues with include path.)
$(warning WARNING: Please install kernel headers locally (make headers_install).)
endif
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris) BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm') BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
...@@ -223,6 +225,7 @@ ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),) ...@@ -223,6 +225,7 @@ ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
DWARF2BTF = y DWARF2BTF = y
endif endif
endif endif
endif
# Trick to allow make to be run from this directory # Trick to allow make to be run from this directory
all: all:
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "bpf/libbpf.h" #include "libbpf.h"
#include "bpf_insn.h" #include "bpf_insn.h"
#include "sock_example.h" #include "sock_example.h"
......
...@@ -50,8 +50,8 @@ ...@@ -50,8 +50,8 @@
#include "cgroup_helpers.h" #include "cgroup_helpers.h"
#include "hbm.h" #include "hbm.h"
#include "bpf_util.h" #include "bpf_util.h"
#include "bpf/bpf.h" #include "bpf.h"
#include "bpf/libbpf.h" #include "libbpf.h"
bool outFlag = true; bool outFlag = true;
int minRate = 1000; /* cgroup rate limit in Mbps */ int minRate = 1000; /* cgroup rate limit in Mbps */
...@@ -411,7 +411,7 @@ static void Usage(void) ...@@ -411,7 +411,7 @@ static void Usage(void)
" -l also limit flows using loopback\n" " -l also limit flows using loopback\n"
" -n <#> to create cgroup \"/hbm#\" and attach prog\n" " -n <#> to create cgroup \"/hbm#\" and attach prog\n"
" Default is /hbm1\n" " Default is /hbm1\n"
" --no_cn disable CN notifcations\n" " --no_cn disable CN notifications\n"
" -r <rate> Rate in Mbps\n" " -r <rate> Rate in Mbps\n"
" -s Update HBM stats\n" " -s Update HBM stats\n"
" -t <time> Exit after specified seconds (default is 0)\n" " -t <time> Exit after specified seconds (default is 0)\n"
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "bpf_load.h" #include "bpf_load.h"
#include "bpf_util.h" #include "bpf_util.h"
#include "bpf/libbpf.h" #include "libbpf.h"
static void dump_counts(int fd) static void dump_counts(int fd)
{ {
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include <assert.h> #include <assert.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "bpf/libbpf.h" #include "libbpf.h"
#include "sock_example.h" #include "sock_example.h"
#include <unistd.h> #include <unistd.h>
#include <arpa/inet.h> #include <arpa/inet.h>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include <assert.h> #include <assert.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "bpf/libbpf.h" #include "libbpf.h"
#include "sock_example.h" #include "sock_example.h"
#include <unistd.h> #include <unistd.h>
#include <arpa/inet.h> #include <arpa/inet.h>
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
#include <net/if.h> #include <net/if.h>
#include "bpf_util.h" #include "bpf_util.h"
#include "bpf/bpf.h" #include "bpf.h"
#include "bpf/libbpf.h" #include "libbpf.h"
static int ifindex; static int ifindex;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
......
...@@ -18,8 +18,8 @@ ...@@ -18,8 +18,8 @@
#include <netinet/ether.h> #include <netinet/ether.h>
#include <unistd.h> #include <unistd.h>
#include <time.h> #include <time.h>
#include "bpf/bpf.h" #include "bpf.h"
#include "bpf/libbpf.h" #include "libbpf.h"
#define STATS_INTERVAL_S 2U #define STATS_INTERVAL_S 2U
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <fcntl.h> #include <fcntl.h>
#include <libgen.h> #include <libgen.h>
#include "bpf/libbpf.h" #include "libbpf.h"
#include <bpf/bpf.h> #include <bpf/bpf.h>
......
...@@ -25,7 +25,7 @@ static const char *__doc__ = ...@@ -25,7 +25,7 @@ static const char *__doc__ =
#define MAX_PROG 6 #define MAX_PROG 6
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "bpf/libbpf.h" #include "libbpf.h"
#include "bpf_util.h" #include "bpf_util.h"
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include "bpf_util.h" #include "bpf_util.h"
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "bpf/libbpf.h" #include "libbpf.h"
static int ifindex_in; static int ifindex_in;
static int ifindex_out; static int ifindex_out;
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include "bpf_util.h" #include "bpf_util.h"
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "bpf/libbpf.h" #include "libbpf.h"
static int ifindex_in; static int ifindex_in;
static int ifindex_out; static int ifindex_out;
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <sys/ioctl.h> #include <sys/ioctl.h>
#include <sys/syscall.h> #include <sys/syscall.h>
#include "bpf_util.h" #include "bpf_util.h"
#include "bpf/libbpf.h" #include "libbpf.h"
#include <sys/resource.h> #include <sys/resource.h>
#include <libgen.h> #include <libgen.h>
......
...@@ -22,8 +22,8 @@ static const char *__doc__ = " XDP RX-queue info extract example\n\n" ...@@ -22,8 +22,8 @@ static const char *__doc__ = " XDP RX-queue info extract example\n\n"
#include <arpa/inet.h> #include <arpa/inet.h>
#include <linux/if_link.h> #include <linux/if_link.h>
#include "bpf/bpf.h" #include "bpf.h"
#include "bpf/libbpf.h" #include "libbpf.h"
#include "bpf_util.h" #include "bpf_util.h"
static int ifindex = -1; static int ifindex = -1;
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <netinet/ether.h> #include <netinet/ether.h>
#include <unistd.h> #include <unistd.h>
#include <time.h> #include <time.h>
#include "bpf/libbpf.h" #include "libbpf.h"
#include <bpf/bpf.h> #include <bpf/bpf.h>
#include "bpf_util.h" #include "bpf_util.h"
#include "xdp_tx_iptunnel_common.h" #include "xdp_tx_iptunnel_common.h"
......
...@@ -27,8 +27,8 @@ ...@@ -27,8 +27,8 @@
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include "bpf/libbpf.h" #include "libbpf.h"
#include "bpf/xsk.h" #include "xsk.h"
#include <bpf/bpf.h> #include <bpf/bpf.h>
#ifndef SOL_XDP #ifndef SOL_XDP
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <sys/vfs.h> #include <sys/vfs.h>
#include <bpf.h> #include <bpf.h>
#include <libbpf.h> /* libbpf_num_possible_cpus */
#include "main.h" #include "main.h"
...@@ -439,57 +440,13 @@ unsigned int get_page_size(void) ...@@ -439,57 +440,13 @@ unsigned int get_page_size(void)
unsigned int get_possible_cpus(void) unsigned int get_possible_cpus(void)
{ {
static unsigned int result; int cpus = libbpf_num_possible_cpus();
char buf[128];
long int n;
char *ptr;
int fd;
if (result)
return result;
fd = open("/sys/devices/system/cpu/possible", O_RDONLY);
if (fd < 0) {
p_err("can't open sysfs possible cpus");
exit(-1);
}
n = read(fd, buf, sizeof(buf));
if (n < 2) {
p_err("can't read sysfs possible cpus");
exit(-1);
}
close(fd);
if (n == sizeof(buf)) { if (cpus < 0) {
p_err("read sysfs possible cpus overflow"); p_err("Can't get # of possible cpus: %s", strerror(-cpus));
exit(-1); exit(-1);
} }
return cpus;
ptr = buf;
n = 0;
while (*ptr && *ptr != '\n') {
unsigned int a, b;
if (sscanf(ptr, "%u-%u", &a, &b) == 2) {
n += b - a + 1;
ptr = strchr(ptr, '-') + 1;
} else if (sscanf(ptr, "%u", &a) == 1) {
n++;
} else {
assert(0);
}
while (isdigit(*ptr))
ptr++;
if (*ptr == ',')
ptr++;
}
result = n;
return result;
} }
static char * static char *
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_GENERIC_SOCKET_H
#define __ASM_GENERIC_SOCKET_H
#include <linux/posix_types.h>
#include <asm/sockios.h>
/* For setsockopt(2) */
#define SOL_SOCKET 1
#define SO_DEBUG 1
#define SO_REUSEADDR 2
#define SO_TYPE 3
#define SO_ERROR 4
#define SO_DONTROUTE 5
#define SO_BROADCAST 6
#define SO_SNDBUF 7
#define SO_RCVBUF 8
#define SO_SNDBUFFORCE 32
#define SO_RCVBUFFORCE 33
#define SO_KEEPALIVE 9
#define SO_OOBINLINE 10
#define SO_NO_CHECK 11
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
#define SO_REUSEPORT 15
#ifndef SO_PASSCRED /* powerpc only differs in these */
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
#define SO_SNDLOWAT 19
#define SO_RCVTIMEO_OLD 20
#define SO_SNDTIMEO_OLD 21
#endif
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 22
#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
#define SO_SECURITY_ENCRYPTION_NETWORK 24
#define SO_BINDTODEVICE 25
/* Socket filtering */
#define SO_ATTACH_FILTER 26
#define SO_DETACH_FILTER 27
#define SO_GET_FILTER SO_ATTACH_FILTER
#define SO_PEERNAME 28
#define SO_ACCEPTCONN 30
#define SO_PEERSEC 31
#define SO_PASSSEC 34
#define SO_MARK 36
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
#define SO_RXQ_OVFL 40
#define SO_WIFI_STATUS 41
#define SCM_WIFI_STATUS SO_WIFI_STATUS
#define SO_PEEK_OFF 42
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
#define SO_LOCK_FILTER 44
#define SO_SELECT_ERR_QUEUE 45
#define SO_BUSY_POLL 46
#define SO_MAX_PACING_RATE 47
#define SO_BPF_EXTENSIONS 48
#define SO_INCOMING_CPU 49
#define SO_ATTACH_BPF 50
#define SO_DETACH_BPF SO_DETACH_FILTER
#define SO_ATTACH_REUSEPORT_CBPF 51
#define SO_ATTACH_REUSEPORT_EBPF 52
#define SO_CNX_ADVICE 53
#define SCM_TIMESTAMPING_OPT_STATS 54
#define SO_MEMINFO 55
#define SO_INCOMING_NAPI_ID 56
#define SO_COOKIE 57
#define SCM_TIMESTAMPING_PKTINFO 58
#define SO_PEERGROUPS 59
#define SO_ZEROCOPY 60
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
#define SO_BINDTOIFINDEX 62
#define SO_TIMESTAMP_OLD 29
#define SO_TIMESTAMPNS_OLD 35
#define SO_TIMESTAMPING_OLD 37
#define SO_TIMESTAMP_NEW 63
#define SO_TIMESTAMPNS_NEW 64
#define SO_TIMESTAMPING_NEW 65
#define SO_RCVTIMEO_NEW 66
#define SO_SNDTIMEO_NEW 67
#define SO_DETACH_REUSEPORT_BPF 68
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
/* on 64-bit and x32, avoid the ?: operator */
#define SO_TIMESTAMP SO_TIMESTAMP_OLD
#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD
#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD
#define SO_RCVTIMEO SO_RCVTIMEO_OLD
#define SO_SNDTIMEO SO_SNDTIMEO_OLD
#else
#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW)
#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW)
#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW)
#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW)
#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW)
#endif
#define SCM_TIMESTAMP SO_TIMESTAMP
#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
#define SCM_TIMESTAMPING SO_TIMESTAMPING
#endif
#endif /* __ASM_GENERIC_SOCKET_H */
...@@ -3085,6 +3085,10 @@ struct bpf_sock_tuple { ...@@ -3085,6 +3085,10 @@ struct bpf_sock_tuple {
}; };
}; };
struct bpf_xdp_sock {
__u32 queue_id;
};
#define XDP_PACKET_HEADROOM 256 #define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type. /* User return codes for XDP prog type.
...@@ -3245,6 +3249,7 @@ struct bpf_sock_addr { ...@@ -3245,6 +3249,7 @@ struct bpf_sock_addr {
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
* Stored in network byte order. * Stored in network byte order.
*/ */
__bpf_md_ptr(struct bpf_sock *, sk);
}; };
/* User bpf_sock_ops struct to access socket values and specify request ops /* User bpf_sock_ops struct to access socket values and specify request ops
...@@ -3296,6 +3301,7 @@ struct bpf_sock_ops { ...@@ -3296,6 +3301,7 @@ struct bpf_sock_ops {
__u32 sk_txhash; __u32 sk_txhash;
__u64 bytes_received; __u64 bytes_received;
__u64 bytes_acked; __u64 bytes_acked;
__bpf_md_ptr(struct bpf_sock *, sk);
}; };
/* Definitions for bpf_sock_ops_cb_flags */ /* Definitions for bpf_sock_ops_cb_flags */
......
...@@ -26,10 +26,11 @@ ...@@ -26,10 +26,11 @@
#include <memory.h> #include <memory.h>
#include <unistd.h> #include <unistd.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <errno.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include "bpf.h" #include "bpf.h"
#include "libbpf.h" #include "libbpf.h"
#include <errno.h> #include "libbpf_internal.h"
/* /*
* When building perf, unistd.h is overridden. __NR_bpf is * When building perf, unistd.h is overridden. __NR_bpf is
...@@ -53,10 +54,6 @@ ...@@ -53,10 +54,6 @@
# endif # endif
#endif #endif
#ifndef min
#define min(x, y) ((x) < (y) ? (x) : (y))
#endif
static inline __u64 ptr_to_u64(const void *ptr) static inline __u64 ptr_to_u64(const void *ptr)
{ {
return (__u64) (unsigned long) ptr; return (__u64) (unsigned long) ptr;
......
...@@ -6,10 +6,7 @@ ...@@ -6,10 +6,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include "libbpf.h" #include "libbpf.h"
#include "libbpf_internal.h"
#ifndef min
#define min(x, y) ((x) < (y) ? (x) : (y))
#endif
struct bpf_prog_linfo { struct bpf_prog_linfo {
void *raw_linfo; void *raw_linfo;
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
#include "libbpf_internal.h" #include "libbpf_internal.h"
#include "hashmap.h" #include "hashmap.h"
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
#define BTF_MAX_NR_TYPES 0x7fffffff #define BTF_MAX_NR_TYPES 0x7fffffff
#define BTF_MAX_STR_OFFSET 0x7fffffff #define BTF_MAX_STR_OFFSET 0x7fffffff
......
...@@ -17,6 +17,7 @@ extern "C" { ...@@ -17,6 +17,7 @@ extern "C" {
#define BTF_ELF_SEC ".BTF" #define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext" #define BTF_EXT_ELF_SEC ".BTF.ext"
#define MAPS_ELF_SEC ".maps"
struct btf; struct btf;
struct btf_ext; struct btf_ext;
......
...@@ -18,9 +18,6 @@ ...@@ -18,9 +18,6 @@
#include "libbpf.h" #include "libbpf.h"
#include "libbpf_internal.h" #include "libbpf_internal.h"
#define min(x, y) ((x) < (y) ? (x) : (y))
#define max(x, y) ((x) < (y) ? (y) : (x))
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t"; static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1; static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
......
This diff is collapsed.
...@@ -98,15 +98,16 @@ struct bpf_object_load_attr { ...@@ -98,15 +98,16 @@ struct bpf_object_load_attr {
LIBBPF_API int bpf_object__load(struct bpf_object *obj); LIBBPF_API int bpf_object__load(struct bpf_object *obj);
LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr); LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
LIBBPF_API int bpf_object__unload(struct bpf_object *obj); LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(struct bpf_object *obj); LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj); LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
struct btf; struct btf;
LIBBPF_API struct btf *bpf_object__btf(struct bpf_object *obj); LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
LIBBPF_API struct bpf_program * LIBBPF_API struct bpf_program *
bpf_object__find_program_by_title(struct bpf_object *obj, const char *title); bpf_object__find_program_by_title(const struct bpf_object *obj,
const char *title);
LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev); LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
#define bpf_object__for_each_safe(pos, tmp) \ #define bpf_object__for_each_safe(pos, tmp) \
...@@ -118,7 +119,7 @@ LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev); ...@@ -118,7 +119,7 @@ LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *); typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv, LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
bpf_object_clear_priv_t clear_priv); bpf_object_clear_priv_t clear_priv);
LIBBPF_API void *bpf_object__priv(struct bpf_object *prog); LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog);
LIBBPF_API int LIBBPF_API int
libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
...@@ -129,7 +130,7 @@ LIBBPF_API int libbpf_attach_type_by_name(const char *name, ...@@ -129,7 +130,7 @@ LIBBPF_API int libbpf_attach_type_by_name(const char *name,
/* Accessors of bpf_program */ /* Accessors of bpf_program */
struct bpf_program; struct bpf_program;
LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog, LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
struct bpf_object *obj); const struct bpf_object *obj);
#define bpf_object__for_each_program(pos, obj) \ #define bpf_object__for_each_program(pos, obj) \
for ((pos) = bpf_program__next(NULL, (obj)); \ for ((pos) = bpf_program__next(NULL, (obj)); \
...@@ -137,24 +138,23 @@ LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog, ...@@ -137,24 +138,23 @@ LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
(pos) = bpf_program__next((pos), (obj))) (pos) = bpf_program__next((pos), (obj)))
LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog, LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog,
struct bpf_object *obj); const struct bpf_object *obj);
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
void *);
LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv, LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
bpf_program_clear_priv_t clear_priv); bpf_program_clear_priv_t clear_priv);
LIBBPF_API void *bpf_program__priv(struct bpf_program *prog); LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
__u32 ifindex); __u32 ifindex);
LIBBPF_API const char *bpf_program__title(struct bpf_program *prog, LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
bool needs_copy); bool needs_copy);
LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license, LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
__u32 kern_version); __u32 kern_version);
LIBBPF_API int bpf_program__fd(struct bpf_program *prog); LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog, LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
const char *path, const char *path,
int instance); int instance);
...@@ -227,7 +227,7 @@ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n, ...@@ -227,7 +227,7 @@ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance, LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
bpf_program_prep_t prep); bpf_program_prep_t prep);
LIBBPF_API int bpf_program__nth_fd(struct bpf_program *prog, int n); LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
/* /*
* Adjust type of BPF program. Default is kprobe. * Adjust type of BPF program. Default is kprobe.
...@@ -246,14 +246,14 @@ LIBBPF_API void ...@@ -246,14 +246,14 @@ LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog, bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type); enum bpf_attach_type type);
LIBBPF_API bool bpf_program__is_socket_filter(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_tracepoint(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_raw_tracepoint(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_kprobe(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_sched_cls(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_sched_act(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_xdp(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_perf_event(struct bpf_program *prog); LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
/* /*
* No need for __attribute__((packed)), all members of 'bpf_map_def' * No need for __attribute__((packed)), all members of 'bpf_map_def'
...@@ -275,10 +275,10 @@ struct bpf_map_def { ...@@ -275,10 +275,10 @@ struct bpf_map_def {
*/ */
struct bpf_map; struct bpf_map;
LIBBPF_API struct bpf_map * LIBBPF_API struct bpf_map *
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name); bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
LIBBPF_API int LIBBPF_API int
bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name); bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
/* /*
* Get bpf_map through the offset of corresponding struct bpf_map_def * Get bpf_map through the offset of corresponding struct bpf_map_def
...@@ -288,7 +288,7 @@ LIBBPF_API struct bpf_map * ...@@ -288,7 +288,7 @@ LIBBPF_API struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset); bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
LIBBPF_API struct bpf_map * LIBBPF_API struct bpf_map *
bpf_map__next(struct bpf_map *map, struct bpf_object *obj); bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
#define bpf_object__for_each_map(pos, obj) \ #define bpf_object__for_each_map(pos, obj) \
for ((pos) = bpf_map__next(NULL, (obj)); \ for ((pos) = bpf_map__next(NULL, (obj)); \
(pos) != NULL; \ (pos) != NULL; \
...@@ -296,22 +296,22 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj); ...@@ -296,22 +296,22 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
#define bpf_map__for_each bpf_object__for_each_map #define bpf_map__for_each bpf_object__for_each_map
LIBBPF_API struct bpf_map * LIBBPF_API struct bpf_map *
bpf_map__prev(struct bpf_map *map, struct bpf_object *obj); bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
LIBBPF_API int bpf_map__fd(struct bpf_map *map); LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
LIBBPF_API const struct bpf_map_def *bpf_map__def(struct bpf_map *map); LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
LIBBPF_API const char *bpf_map__name(struct bpf_map *map); LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv); bpf_map_clear_priv_t clear_priv);
LIBBPF_API void *bpf_map__priv(struct bpf_map *map); LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map); LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
LIBBPF_API bool bpf_map__is_internal(struct bpf_map *map); LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
...@@ -454,6 +454,22 @@ bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear); ...@@ -454,6 +454,22 @@ bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
LIBBPF_API void LIBBPF_API void
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear); bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
/*
* A helper function to get the number of possible CPUs before looking up
* per-CPU maps. Negative errno is returned on failure.
*
* Example usage:
*
* int ncpus = libbpf_num_possible_cpus();
* if (ncpus < 0) {
* // error handling
* }
* long values[ncpus];
* bpf_map_lookup_elem(per_cpu_map_fd, key, values);
*
*/
LIBBPF_API int libbpf_num_possible_cpus(void);
#ifdef __cplusplus #ifdef __cplusplus
} /* extern "C" */ } /* extern "C" */
#endif #endif
......
...@@ -172,4 +172,5 @@ LIBBPF_0.0.4 { ...@@ -172,4 +172,5 @@ LIBBPF_0.0.4 {
btf_dump__new; btf_dump__new;
btf__parse_elf; btf__parse_elf;
bpf_object__load_xattr; bpf_object__load_xattr;
libbpf_num_possible_cpus;
} LIBBPF_0.0.3; } LIBBPF_0.0.3;
...@@ -23,6 +23,13 @@ ...@@ -23,6 +23,13 @@
#define BTF_PARAM_ENC(name, type) (name), (type) #define BTF_PARAM_ENC(name, type) (name), (type)
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
#ifndef min
# define min(x, y) ((x) < (y) ? (x) : (y))
#endif
#ifndef max
# define max(x, y) ((x) < (y) ? (y) : (x))
#endif
extern void libbpf_print(enum libbpf_print_level level, extern void libbpf_print(enum libbpf_print_level level,
const char *format, ...) const char *format, ...)
__attribute__((format(printf, 2, 3))); __attribute__((format(printf, 2, 3)));
......
...@@ -60,10 +60,8 @@ struct xsk_socket { ...@@ -60,10 +60,8 @@ struct xsk_socket {
struct xsk_umem *umem; struct xsk_umem *umem;
struct xsk_socket_config config; struct xsk_socket_config config;
int fd; int fd;
int xsks_map;
int ifindex; int ifindex;
int prog_fd; int prog_fd;
int qidconf_map_fd;
int xsks_map_fd; int xsks_map_fd;
__u32 queue_id; __u32 queue_id;
char ifname[IFNAMSIZ]; char ifname[IFNAMSIZ];
...@@ -265,15 +263,11 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk) ...@@ -265,15 +263,11 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
/* This is the C-program: /* This is the C-program:
* SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx) * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
* { * {
* int *qidconf, index = ctx->rx_queue_index; * int index = ctx->rx_queue_index;
* *
* // A set entry here means that the correspnding queue_id * // A set entry here means that the correspnding queue_id
* // has an active AF_XDP socket bound to it. * // has an active AF_XDP socket bound to it.
* qidconf = bpf_map_lookup_elem(&qidconf_map, &index); * if (bpf_map_lookup_elem(&xsks_map, &index))
* if (!qidconf)
* return XDP_ABORTED;
*
* if (*qidconf)
* return bpf_redirect_map(&xsks_map, index, 0); * return bpf_redirect_map(&xsks_map, index, 0);
* *
* return XDP_PASS; * return XDP_PASS;
...@@ -286,15 +280,10 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk) ...@@ -286,15 +280,10 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4), BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd), BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV32_IMM(BPF_REG_0, 0),
/* if r1 == 0 goto +8 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
BPF_MOV32_IMM(BPF_REG_0, 2), BPF_MOV32_IMM(BPF_REG_0, 2),
/* r1 = *(u32 *)(r1 + 0) */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
/* if r1 == 0 goto +5 */ /* if r1 == 0 goto +5 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5), BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
/* r2 = *(u32 *)(r10 - 4) */ /* r2 = *(u32 *)(r10 - 4) */
...@@ -366,18 +355,11 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk) ...@@ -366,18 +355,11 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk)
if (max_queues < 0) if (max_queues < 0)
return max_queues; return max_queues;
fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map", fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
sizeof(int), sizeof(int), max_queues, 0); sizeof(int), sizeof(int), max_queues, 0);
if (fd < 0) if (fd < 0)
return fd; return fd;
xsk->qidconf_map_fd = fd;
fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
sizeof(int), sizeof(int), max_queues, 0);
if (fd < 0) {
close(xsk->qidconf_map_fd);
return fd;
}
xsk->xsks_map_fd = fd; xsk->xsks_map_fd = fd;
return 0; return 0;
...@@ -385,10 +367,8 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk) ...@@ -385,10 +367,8 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk)
static void xsk_delete_bpf_maps(struct xsk_socket *xsk) static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
{ {
close(xsk->qidconf_map_fd); bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
close(xsk->xsks_map_fd); close(xsk->xsks_map_fd);
xsk->qidconf_map_fd = -1;
xsk->xsks_map_fd = -1;
} }
static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
...@@ -417,10 +397,9 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) ...@@ -417,10 +397,9 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
if (err) if (err)
goto out_map_ids; goto out_map_ids;
for (i = 0; i < prog_info.nr_map_ids; i++) { xsk->xsks_map_fd = -1;
if (xsk->qidconf_map_fd != -1 && xsk->xsks_map_fd != -1)
break;
for (i = 0; i < prog_info.nr_map_ids; i++) {
fd = bpf_map_get_fd_by_id(map_ids[i]); fd = bpf_map_get_fd_by_id(map_ids[i]);
if (fd < 0) if (fd < 0)
continue; continue;
...@@ -431,11 +410,6 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) ...@@ -431,11 +410,6 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
continue; continue;
} }
if (!strcmp(map_info.name, "qidconf_map")) {
xsk->qidconf_map_fd = fd;
continue;
}
if (!strcmp(map_info.name, "xsks_map")) { if (!strcmp(map_info.name, "xsks_map")) {
xsk->xsks_map_fd = fd; xsk->xsks_map_fd = fd;
continue; continue;
...@@ -445,40 +419,18 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) ...@@ -445,40 +419,18 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
} }
err = 0; err = 0;
if (xsk->qidconf_map_fd < 0 || xsk->xsks_map_fd < 0) { if (xsk->xsks_map_fd == -1)
err = -ENOENT; err = -ENOENT;
xsk_delete_bpf_maps(xsk);
}
out_map_ids: out_map_ids:
free(map_ids); free(map_ids);
return err; return err;
} }
static void xsk_clear_bpf_maps(struct xsk_socket *xsk)
{
int qid = false;
bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &qid, 0);
bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
}
static int xsk_set_bpf_maps(struct xsk_socket *xsk) static int xsk_set_bpf_maps(struct xsk_socket *xsk)
{ {
int qid = true, fd = xsk->fd, err; return bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
&xsk->fd, 0);
err = bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &qid, 0);
if (err)
goto out;
err = bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id, &fd, 0);
if (err)
goto out;
return 0;
out:
xsk_clear_bpf_maps(xsk);
return err;
} }
static int xsk_setup_xdp_prog(struct xsk_socket *xsk) static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
...@@ -497,26 +449,27 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk) ...@@ -497,26 +449,27 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
return err; return err;
err = xsk_load_xdp_prog(xsk); err = xsk_load_xdp_prog(xsk);
if (err) if (err) {
goto out_maps; xsk_delete_bpf_maps(xsk);
return err;
}
} else { } else {
xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id); xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
err = xsk_lookup_bpf_maps(xsk); err = xsk_lookup_bpf_maps(xsk);
if (err) if (err) {
goto out_load; close(xsk->prog_fd);
return err;
}
} }
err = xsk_set_bpf_maps(xsk); err = xsk_set_bpf_maps(xsk);
if (err) if (err) {
goto out_load;
return 0;
out_load:
close(xsk->prog_fd);
out_maps:
xsk_delete_bpf_maps(xsk); xsk_delete_bpf_maps(xsk);
close(xsk->prog_fd);
return err; return err;
}
return 0;
} }
int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
...@@ -643,9 +596,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, ...@@ -643,9 +596,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
goto out_mmap_tx; goto out_mmap_tx;
} }
xsk->qidconf_map_fd = -1; xsk->prog_fd = -1;
xsk->xsks_map_fd = -1;
if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
err = xsk_setup_xdp_prog(xsk); err = xsk_setup_xdp_prog(xsk);
if (err) if (err)
...@@ -708,8 +659,10 @@ void xsk_socket__delete(struct xsk_socket *xsk) ...@@ -708,8 +659,10 @@ void xsk_socket__delete(struct xsk_socket *xsk)
if (!xsk) if (!xsk)
return; return;
xsk_clear_bpf_maps(xsk); if (xsk->prog_fd != -1) {
xsk_delete_bpf_maps(xsk); xsk_delete_bpf_maps(xsk);
close(xsk->prog_fd);
}
optlen = sizeof(off); optlen = sizeof(off);
err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
......
...@@ -280,4 +280,5 @@ $(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES) ...@@ -280,4 +280,5 @@ $(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
) > $(VERIFIER_TESTS_H)) ) > $(VERIFIER_TESTS_H))
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \
$(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) \
feature
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#ifndef __BPF_ENDIAN__ #ifndef __BPF_ENDIAN__
#define __BPF_ENDIAN__ #define __BPF_ENDIAN__
#include <linux/stddef.h>
#include <linux/swab.h> #include <linux/swab.h>
/* LLVM's BPF target selects the endianness of the CPU /* LLVM's BPF target selects the endianness of the CPU
......
...@@ -31,7 +31,7 @@ static int (*bpf_map_pop_elem)(void *map, void *value) = ...@@ -31,7 +31,7 @@ static int (*bpf_map_pop_elem)(void *map, void *value) =
(void *) BPF_FUNC_map_pop_elem; (void *) BPF_FUNC_map_pop_elem;
static int (*bpf_map_peek_elem)(void *map, void *value) = static int (*bpf_map_peek_elem)(void *map, void *value) =
(void *) BPF_FUNC_map_peek_elem; (void *) BPF_FUNC_map_peek_elem;
static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) = static int (*bpf_probe_read)(void *dst, int size, const void *unsafe_ptr) =
(void *) BPF_FUNC_probe_read; (void *) BPF_FUNC_probe_read;
static unsigned long long (*bpf_ktime_get_ns)(void) = static unsigned long long (*bpf_ktime_get_ns)(void) =
(void *) BPF_FUNC_ktime_get_ns; (void *) BPF_FUNC_ktime_get_ns;
...@@ -62,7 +62,7 @@ static int (*bpf_perf_event_output)(void *ctx, void *map, ...@@ -62,7 +62,7 @@ static int (*bpf_perf_event_output)(void *ctx, void *map,
(void *) BPF_FUNC_perf_event_output; (void *) BPF_FUNC_perf_event_output;
static int (*bpf_get_stackid)(void *ctx, void *map, int flags) = static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
(void *) BPF_FUNC_get_stackid; (void *) BPF_FUNC_get_stackid;
static int (*bpf_probe_write_user)(void *dst, void *src, int size) = static int (*bpf_probe_write_user)(void *dst, const void *src, int size) =
(void *) BPF_FUNC_probe_write_user; (void *) BPF_FUNC_probe_write_user;
static int (*bpf_current_task_under_cgroup)(void *map, int index) = static int (*bpf_current_task_under_cgroup)(void *map, int index) =
(void *) BPF_FUNC_current_task_under_cgroup; (void *) BPF_FUNC_current_task_under_cgroup;
......
...@@ -6,44 +6,17 @@ ...@@ -6,44 +6,17 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <errno.h> #include <errno.h>
#include <libbpf.h> /* libbpf_num_possible_cpus */
static inline unsigned int bpf_num_possible_cpus(void) static inline unsigned int bpf_num_possible_cpus(void)
{ {
static const char *fcpu = "/sys/devices/system/cpu/possible"; int possible_cpus = libbpf_num_possible_cpus();
unsigned int start, end, possible_cpus = 0;
char buff[128];
FILE *fp;
int len, n, i, j = 0;
fp = fopen(fcpu, "r"); if (possible_cpus < 0) {
if (!fp) { printf("Failed to get # of possible cpus: '%s'!\n",
printf("Failed to open %s: '%s'!\n", fcpu, strerror(errno)); strerror(-possible_cpus));
exit(1); exit(1);
} }
if (!fgets(buff, sizeof(buff), fp)) {
printf("Failed to read %s!\n", fcpu);
exit(1);
}
len = strlen(buff);
for (i = 0; i <= len; i++) {
if (buff[i] == ',' || buff[i] == '\0') {
buff[i] = '\0';
n = sscanf(&buff[j], "%u-%u", &start, &end);
if (n <= 0) {
printf("Failed to retrieve # possible CPUs!\n");
exit(1);
} else if (n == 1) {
end = start;
}
possible_cpus += end - start + 1;
j = i + 1;
}
}
fclose(fp);
return possible_cpus; return possible_cpus;
} }
......
...@@ -47,7 +47,7 @@ int enable_all_controllers(char *cgroup_path) ...@@ -47,7 +47,7 @@ int enable_all_controllers(char *cgroup_path)
char buf[PATH_MAX]; char buf[PATH_MAX];
char *c, *c2; char *c, *c2;
int fd, cfd; int fd, cfd;
size_t len; ssize_t len;
snprintf(path, sizeof(path), "%s/cgroup.controllers", cgroup_path); snprintf(path, sizeof(path), "%s/cgroup.controllers", cgroup_path);
fd = open(path, O_RDONLY); fd = open(path, O_RDONLY);
......
...@@ -5,7 +5,7 @@ static int libbpf_debug_print(enum libbpf_print_level level, ...@@ -5,7 +5,7 @@ static int libbpf_debug_print(enum libbpf_print_level level,
const char *format, va_list args) const char *format, va_list args)
{ {
if (level != LIBBPF_DEBUG) if (level != LIBBPF_DEBUG)
return 0; return vfprintf(stderr, format, args);
if (!strstr(format, "verifier log")) if (!strstr(format, "verifier log"))
return 0; return 0;
...@@ -32,24 +32,69 @@ static int check_load(const char *file, enum bpf_prog_type type) ...@@ -32,24 +32,69 @@ static int check_load(const char *file, enum bpf_prog_type type)
void test_bpf_verif_scale(void) void test_bpf_verif_scale(void)
{ {
const char *scale[] = { const char *sched_cls[] = {
"./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o" "./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o",
}; };
const char *pyperf[] = { const char *raw_tp[] = {
"./pyperf50.o", "./pyperf100.o", "./pyperf180.o" /* full unroll by llvm */
"./pyperf50.o", "./pyperf100.o", "./pyperf180.o",
/* partial unroll. llvm will unroll loop ~150 times.
* C loop count -> 600.
* Asm loop count -> 4.
* 16k insns in loop body.
* Total of 5 such loops. Total program size ~82k insns.
*/
"./pyperf600.o",
/* no unroll at all.
* C loop count -> 600.
* ASM loop count -> 600.
* ~110 insns in loop body.
* Total of 5 such loops. Total program size ~1500 insns.
*/
"./pyperf600_nounroll.o",
"./loop1.o", "./loop2.o",
/* partial unroll. 19k insn in a loop.
* Total program size 20.8k insn.
* ~350k processed_insns
*/
"./strobemeta.o",
/* no unroll, tiny loops */
"./strobemeta_nounroll1.o",
"./strobemeta_nounroll2.o",
};
const char *cg_sysctl[] = {
"./test_sysctl_loop1.o", "./test_sysctl_loop2.o",
}; };
int err, i; int err, i;
if (verifier_stats) if (verifier_stats)
libbpf_set_print(libbpf_debug_print); libbpf_set_print(libbpf_debug_print);
for (i = 0; i < ARRAY_SIZE(scale); i++) { err = check_load("./loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT);
err = check_load(scale[i], BPF_PROG_TYPE_SCHED_CLS); printf("test_scale:loop3:%s\n", err ? (error_cnt--, "OK") : "FAIL");
printf("test_scale:%s:%s\n", scale[i], err ? "FAIL" : "OK");
for (i = 0; i < ARRAY_SIZE(sched_cls); i++) {
err = check_load(sched_cls[i], BPF_PROG_TYPE_SCHED_CLS);
printf("test_scale:%s:%s\n", sched_cls[i], err ? "FAIL" : "OK");
} }
for (i = 0; i < ARRAY_SIZE(pyperf); i++) { for (i = 0; i < ARRAY_SIZE(raw_tp); i++) {
err = check_load(pyperf[i], BPF_PROG_TYPE_RAW_TRACEPOINT); err = check_load(raw_tp[i], BPF_PROG_TYPE_RAW_TRACEPOINT);
printf("test_scale:%s:%s\n", pyperf[i], err ? "FAIL" : "OK"); printf("test_scale:%s:%s\n", raw_tp[i], err ? "FAIL" : "OK");
} }
for (i = 0; i < ARRAY_SIZE(cg_sysctl); i++) {
err = check_load(cg_sysctl[i], BPF_PROG_TYPE_CGROUP_SYSCTL);
printf("test_scale:%s:%s\n", cg_sysctl[i], err ? "FAIL" : "OK");
}
err = check_load("./test_xdp_loop.o", BPF_PROG_TYPE_XDP);
printf("test_scale:test_xdp_loop:%s\n", err ? "FAIL" : "OK");
err = check_load("./test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL);
printf("test_scale:test_seg6_loop:%s\n", err ? "FAIL" : "OK");
} }
...@@ -57,17 +57,25 @@ struct frag_hdr { ...@@ -57,17 +57,25 @@ struct frag_hdr {
__be32 identification; __be32 identification;
}; };
struct bpf_map_def SEC("maps") jmp_table = { struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} jmp_table SEC(".maps") = {
.type = BPF_MAP_TYPE_PROG_ARRAY, .type = BPF_MAP_TYPE_PROG_ARRAY,
.max_entries = 8,
.key_size = sizeof(__u32), .key_size = sizeof(__u32),
.value_size = sizeof(__u32), .value_size = sizeof(__u32),
.max_entries = 8
}; };
struct bpf_map_def SEC("maps") last_dissection = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct bpf_flow_keys *value;
} last_dissection SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_flow_keys),
.max_entries = 1, .max_entries = 1,
}; };
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/kfree_skb")
int nested_loops(volatile struct pt_regs* ctx)
{
int i, j, sum = 0, m;
for (j = 0; j < 300; j++)
for (i = 0; i < j; i++) {
if (j & 1)
m = ctx->rax;
else
m = j;
sum += i * m;
}
return sum;
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/consume_skb")
int while_true(volatile struct pt_regs* ctx)
{
int i = 0;
while (true) {
if (ctx->rax & 1)
i += 3;
else
i += 7;
if (i > 40)
break;
}
return i;
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/consume_skb")
int while_true(volatile struct pt_regs* ctx)
{
__u64 i = 0, sum = 0;
do {
i++;
sum += ctx->rax;
} while (i < 0x100000000ULL);
return sum;
}
...@@ -10,24 +10,22 @@ ...@@ -10,24 +10,22 @@
#define REFRESH_TIME_NS 100000000 #define REFRESH_TIME_NS 100000000
#define NS_PER_SEC 1000000000 #define NS_PER_SEC 1000000000
struct bpf_map_def SEC("maps") percpu_netcnt = { struct {
__u32 type;
struct bpf_cgroup_storage_key *key;
struct percpu_net_cnt *value;
} percpu_netcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, .type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct percpu_net_cnt),
}; };
BPF_ANNOTATE_KV_PAIR(percpu_netcnt, struct bpf_cgroup_storage_key, struct {
struct percpu_net_cnt); __u32 type;
struct bpf_cgroup_storage_key *key;
struct bpf_map_def SEC("maps") netcnt = { struct net_cnt *value;
} netcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE, .type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct net_cnt),
}; };
BPF_ANNOTATE_KV_PAIR(netcnt, struct bpf_cgroup_storage_key,
struct net_cnt);
SEC("cgroup/skb") SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb) int bpf_nextcnt(struct __sk_buff *skb)
{ {
......
...@@ -220,7 +220,11 @@ static inline __attribute__((__always_inline__)) int __on_event(struct pt_regs * ...@@ -220,7 +220,11 @@ static inline __attribute__((__always_inline__)) int __on_event(struct pt_regs *
int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym); int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym);
if (symbol_counter == NULL) if (symbol_counter == NULL)
return 0; return 0;
#pragma unroll #ifdef NO_UNROLL
#pragma clang loop unroll(disable)
#else
#pragma clang loop unroll(full)
#endif
/* Unwind python stack */ /* Unwind python stack */
for (int i = 0; i < STACK_MAX_LEN; ++i) { for (int i = 0; i < STACK_MAX_LEN; ++i) {
if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) { if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) {
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
/* clang will not unroll the loop 600 times.
* Instead it will unroll it to the amount it deemed
* appropriate, but the loop will still execute 600 times.
* Total program size is around 90k insns
*/
#include "pyperf.h"
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
#define NO_UNROLL
/* clang will not unroll at all.
* Total program size is around 2k insns
*/
#include "pyperf.h"
...@@ -7,25 +7,36 @@ ...@@ -7,25 +7,36 @@
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_endian.h" #include "bpf_endian.h"
struct bpf_map_def SEC("maps") socket_cookies = { struct socket_cookie {
.type = BPF_MAP_TYPE_HASH, __u64 cookie_key;
.key_size = sizeof(__u64), __u32 cookie_value;
.value_size = sizeof(__u32), };
.max_entries = 1 << 8,
struct {
__u32 type;
__u32 map_flags;
int *key;
struct socket_cookie *value;
} socket_cookies SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE,
.map_flags = BPF_F_NO_PREALLOC,
}; };
SEC("cgroup/connect6") SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx) int set_cookie(struct bpf_sock_addr *ctx)
{ {
__u32 cookie_value = 0xFF; struct socket_cookie *p;
__u64 cookie_key;
if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6) if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
return 1; return 1;
cookie_key = bpf_get_socket_cookie(ctx); p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0,
if (bpf_map_update_elem(&socket_cookies, &cookie_key, &cookie_value, 0)) BPF_SK_STORAGE_GET_F_CREATE);
return 0; if (!p)
return 1;
p->cookie_value = 0xFF;
p->cookie_key = bpf_get_socket_cookie(ctx);
return 1; return 1;
} }
...@@ -33,9 +44,8 @@ int set_cookie(struct bpf_sock_addr *ctx) ...@@ -33,9 +44,8 @@ int set_cookie(struct bpf_sock_addr *ctx)
SEC("sockops") SEC("sockops")
int update_cookie(struct bpf_sock_ops *ctx) int update_cookie(struct bpf_sock_ops *ctx)
{ {
__u32 new_cookie_value; struct bpf_sock *sk;
__u32 *cookie_value; struct socket_cookie *p;
__u64 cookie_key;
if (ctx->family != AF_INET6) if (ctx->family != AF_INET6)
return 1; return 1;
...@@ -43,14 +53,17 @@ int update_cookie(struct bpf_sock_ops *ctx) ...@@ -43,14 +53,17 @@ int update_cookie(struct bpf_sock_ops *ctx)
if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB) if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
return 1; return 1;
cookie_key = bpf_get_socket_cookie(ctx); if (!ctx->sk)
return 1;
p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0, 0);
if (!p)
return 1;
cookie_value = bpf_map_lookup_elem(&socket_cookies, &cookie_key); if (p->cookie_key != bpf_get_socket_cookie(ctx))
if (!cookie_value)
return 1; return 1;
new_cookie_value = (ctx->local_port << 8) | *cookie_value; p->cookie_value = (ctx->local_port << 8) | p->cookie_value;
bpf_map_update_elem(&socket_cookies, &cookie_key, &new_cookie_value, 0);
return 1; return 1;
} }
......
#include <linux/bpf.h> #include <linux/bpf.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_util.h"
#include "bpf_endian.h" #include "bpf_endian.h"
int _version SEC("version") = 1; int _version SEC("version") = 1;
......
#include <linux/bpf.h> #include <linux/bpf.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_util.h"
#include "bpf_endian.h" #include "bpf_endian.h"
int _version SEC("version") = 1; int _version SEC("version") = 1;
......
#include <linux/bpf.h> #include <linux/bpf.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_util.h"
#include "bpf_endian.h" #include "bpf_endian.h"
int _version SEC("version") = 1; int _version SEC("version") = 1;
......
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 100
#define STROBE_MAX_MAP_ENTRIES 20
/* full unroll by llvm #undef NO_UNROLL */
#include "strobemeta.h"
This diff is collapsed.
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 13
#define STROBE_MAX_MAP_ENTRIES 20
#define NO_UNROLL
#include "strobemeta.h"
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 30
#define STROBE_MAX_MAP_ENTRIES 20
#define NO_UNROLL
#include "strobemeta.h"
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
int _version SEC("version") = 1;
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
/* just to validate we can handle maps in multiple sections */
struct bpf_map_def SEC("maps") btf_map_legacy = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(long long),
.max_entries = 4,
};
BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);
struct {
int *key;
struct ipv_counts *value;
unsigned int type;
unsigned int max_entries;
} btf_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.max_entries = 4,
};
struct dummy_tracepoint_args {
unsigned long long pad;
struct sock *sock;
};
__attribute__((noinline))
static int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
if (!arg->sock)
return 0;
counts = bpf_map_lookup_elem(&btf_map, &key);
if (!counts)
return 0;
counts->v6++;
/* just verify we can reference both maps */
counts = bpf_map_lookup_elem(&btf_map_legacy, &key);
if (!counts)
return 0;
return 0;
}
__attribute__((noinline))
static int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
SEC("dummy_tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
return test_long_fname_1(arg);
}
char _license[] SEC("license") = "GPL";
...@@ -15,17 +15,25 @@ struct stack_trace_t { ...@@ -15,17 +15,25 @@ struct stack_trace_t {
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
}; };
struct bpf_map_def SEC("maps") perfmap = { struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} perfmap SEC(".maps") = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.max_entries = 2,
.key_size = sizeof(int), .key_size = sizeof(int),
.value_size = sizeof(__u32), .value_size = sizeof(__u32),
.max_entries = 2,
}; };
struct bpf_map_def SEC("maps") stackdata_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct stack_trace_t *value;
} stackdata_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, .type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct stack_trace_t),
.max_entries = 1, .max_entries = 1,
}; };
...@@ -47,10 +55,13 @@ struct bpf_map_def SEC("maps") stackdata_map = { ...@@ -47,10 +55,13 @@ struct bpf_map_def SEC("maps") stackdata_map = {
* issue and avoid complicated C programming massaging. * issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here. * This is an acceptable workaround since there is one entry here.
*/ */
struct bpf_map_def SEC("maps") rawdata_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 (*value)[2 * MAX_STACK_RAWTP];
} rawdata_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, .type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = MAX_STACK_RAWTP * sizeof(__u64) * 2,
.max_entries = 1, .max_entries = 1,
}; };
......
...@@ -11,29 +11,31 @@ struct hmap_elem { ...@@ -11,29 +11,31 @@ struct hmap_elem {
int var[VAR_NUM]; int var[VAR_NUM];
}; };
struct bpf_map_def SEC("maps") hash_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct hmap_elem *value;
} hash_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(int),
.value_size = sizeof(struct hmap_elem),
.max_entries = 1, .max_entries = 1,
}; };
BPF_ANNOTATE_KV_PAIR(hash_map, int, struct hmap_elem);
struct array_elem { struct array_elem {
struct bpf_spin_lock lock; struct bpf_spin_lock lock;
int var[VAR_NUM]; int var[VAR_NUM];
}; };
struct bpf_map_def SEC("maps") array_map = { struct {
__u32 type;
__u32 max_entries;
int *key;
struct array_elem *value;
} array_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(struct array_elem),
.max_entries = 1, .max_entries = 1,
}; };
BPF_ANNOTATE_KV_PAIR(array_map, int, struct array_elem);
SEC("map_lock_demo") SEC("map_lock_demo")
int bpf_map_lock_test(struct __sk_buff *skb) int bpf_map_lock_test(struct __sk_buff *skb)
{ {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment