Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
B
bcc
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
bcc
Commits
f4727f43
Commit
f4727f43
authored
Jul 17, 2017
by
Brenden Blanco
Committed by
GitHub
Jul 17, 2017
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #1258 from shodoco/tcpbpf
Update bpf.h and virtual_bpf.h to 4.13-rc1
parents
830c1f76
e473a20e
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
286 additions
and
10 deletions
+286
-10
src/cc/compat/linux/bpf.h
src/cc/compat/linux/bpf.h
+143
-5
src/cc/compat/linux/virtual_bpf.h
src/cc/compat/linux/virtual_bpf.h
+143
-5
No files found.
src/cc/compat/linux/bpf.h
View file @
f4727f43
...
...
@@ -82,6 +82,11 @@ enum bpf_cmd {
BPF_PROG_ATTACH
,
BPF_PROG_DETACH
,
BPF_PROG_TEST_RUN
,
BPF_PROG_GET_NEXT_ID
,
BPF_MAP_GET_NEXT_ID
,
BPF_PROG_GET_FD_BY_ID
,
BPF_MAP_GET_FD_BY_ID
,
BPF_OBJ_GET_INFO_BY_FD
,
};
enum
bpf_map_type
{
...
...
@@ -115,12 +120,14 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LWT_IN
,
BPF_PROG_TYPE_LWT_OUT
,
BPF_PROG_TYPE_LWT_XMIT
,
BPF_PROG_TYPE_SOCK_OPS
,
};
enum
bpf_attach_type
{
BPF_CGROUP_INET_INGRESS
,
BPF_CGROUP_INET_EGRESS
,
BPF_CGROUP_INET_SOCK_CREATE
,
BPF_CGROUP_SOCK_OPS
,
__MAX_BPF_ATTACH_TYPE
};
...
...
@@ -132,6 +139,13 @@ enum bpf_attach_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
* has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
* and NET_IP_ALIGN defined to 2.
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
#define BPF_PSEUDO_MAP_FD 1
/* flags for BPF_MAP_UPDATE_ELEM command */
...
...
@@ -177,6 +191,7 @@ union bpf_attr {
__u32
log_size
;
/* size of user buffer */
__aligned_u64
log_buf
;
/* user supplied buffer */
__u32
kern_version
;
/* checked when prog_type=kprobe */
__u32
prog_flags
;
};
struct
{
/* anonymous struct used by BPF_OBJ_* commands */
...
...
@@ -201,6 +216,21 @@ union bpf_attr {
__u32
repeat
;
__u32
duration
;
}
test
;
struct
{
/* anonymous struct used by BPF_*_GET_*_ID */
union
{
__u32
start_id
;
__u32
prog_id
;
__u32
map_id
;
};
__u32
next_id
;
};
struct
{
/* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
__u32
bpf_fd
;
__u32
info_len
;
__aligned_u64
info
;
}
info
;
}
__attribute__
((
aligned
(
8
)));
/* BPF helper function descriptions:
...
...
@@ -305,8 +335,11 @@ union bpf_attr {
* @flags: room for future extensions
* Return: 0 on success or negative error
*
* u64 bpf_perf_event_read(&map, index)
* Return: Number events read or error code
* u64 bpf_perf_event_read(map, flags)
* read perf event counter value
* @map: pointer to perf_event_array map
* @flags: index of event in the map or bitmask flags
* Return: value of perf event counter read or error code
*
* int bpf_redirect(ifindex, flags)
* redirect to another netdev
...
...
@@ -320,11 +353,11 @@ union bpf_attr {
* @skb: pointer to skb
* Return: realm if != 0
*
* int bpf_perf_event_output(ctx, map,
index
, data, size)
* int bpf_perf_event_output(ctx, map,
flags
, data, size)
* output perf raw sample
* @ctx: struct pt_regs*
* @map: pointer to perf_event_array map
* @
index: index of event in the map
* @
flags: index of event in the map or bitmask flags
* @data: data on stack to be output as raw data
* @size: size of data
* Return: 0 on success or negative error
...
...
@@ -482,6 +515,30 @@ union bpf_attr {
* Get the owner uid of the socket stored inside sk_buff.
* @skb: pointer to skb
* Return: uid of the socket owner on success or overflowuid if failed.
*
* u32 bpf_set_hash(skb, hash)
* Set full skb->hash.
* @skb: pointer to skb
* @hash: hash to set
*
* int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
* Calls setsockopt. Not all opts are available, only those with
* integer optvals plus TCP_CONGESTION.
* Supported levels: SOL_SOCKET and IPROTO_TCP
* @bpf_socket: pointer to bpf_socket
* @level: SOL_SOCKET or IPROTO_TCP
* @optname: option name
* @optval: pointer to option value
* @optlen: length of optval in byes
* Return: 0 or negative error
*
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
* @len_diff: (signed) amount of room to grow/shrink
* @mode: operation mode (enum bpf_adj_room_mode)
* @flags: reserved for future use
* Return: 0 on success or negative error code
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
...
...
@@ -531,7 +588,10 @@ union bpf_attr {
FN(xdp_adjust_head), \
FN(probe_read_str), \
FN(get_socket_cookie), \
FN(get_socket_uid),
FN(get_socket_uid), \
FN(set_hash), \
FN(setsockopt), \
FN(skb_adjust_room),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
...
...
@@ -581,6 +641,11 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum
bpf_adj_room_mode
{
BPF_ADJ_ROOM_NET
,
};
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
...
...
@@ -662,4 +727,77 @@ struct xdp_md {
__u32
data_end
;
};
#define BPF_TAG_SIZE 8
struct
bpf_prog_info
{
__u32
type
;
__u32
id
;
__u8
tag
[
BPF_TAG_SIZE
];
__u32
jited_prog_len
;
__u32
xlated_prog_len
;
__aligned_u64
jited_prog_insns
;
__aligned_u64
xlated_prog_insns
;
}
__attribute__
((
aligned
(
8
)));
struct
bpf_map_info
{
__u32
type
;
__u32
id
;
__u32
key_size
;
__u32
value_size
;
__u32
max_entries
;
__u32
map_flags
;
}
__attribute__
((
aligned
(
8
)));
/* User bpf_sock_ops struct to access socket values and specify request ops
* and their replies.
* Some of this fields are in network (bigendian) byte order and may need
* to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
* New fields can only be added at the end of this structure
*/
struct
bpf_sock_ops
{
__u32
op
;
union
{
__u32
reply
;
__u32
replylong
[
4
];
};
__u32
family
;
__u32
remote_ip4
;
/* Stored in network byte order */
__u32
local_ip4
;
/* Stored in network byte order */
__u32
remote_ip6
[
4
];
/* Stored in network byte order */
__u32
local_ip6
[
4
];
/* Stored in network byte order */
__u32
remote_port
;
/* Stored in network byte order */
__u32
local_port
;
/* stored in host byte order */
};
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
*/
enum
{
BPF_SOCK_OPS_VOID
,
BPF_SOCK_OPS_TIMEOUT_INIT
,
/* Should return SYN-RTO value to use or
* -1 if default value should be used
*/
BPF_SOCK_OPS_RWND_INIT
,
/* Should return initial advertized
* window (in packets) or -1 if default
* value should be used
*/
BPF_SOCK_OPS_TCP_CONNECT_CB
,
/* Calls BPF program right before an
* active connection is initialized
*/
BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB
,
/* Calls BPF program when an
* active connection is
* established
*/
BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB
,
/* Calls BPF program when a
* passive connection is
* established
*/
BPF_SOCK_OPS_NEEDS_ECN
,
/* If connection's congestion control
* needs ECN
*/
};
#define TCP_BPF_IW 1001
/* Set TCP initial congestion window */
#define TCP_BPF_SNDCWND_CLAMP 1002
/* Set sndcwnd_clamp */
#endif
/* _UAPI__LINUX_BPF_H__ */
src/cc/compat/linux/virtual_bpf.h
View file @
f4727f43
...
...
@@ -83,6 +83,11 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
BPF_MAP_GET_FD_BY_ID,
BPF_OBJ_GET_INFO_BY_FD,
};
enum bpf_map_type {
...
...
@@ -116,12 +121,14 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LWT_IN,
BPF_PROG_TYPE_LWT_OUT,
BPF_PROG_TYPE_LWT_XMIT,
BPF_PROG_TYPE_SOCK_OPS,
};
enum bpf_attach_type {
BPF_CGROUP_INET_INGRESS,
BPF_CGROUP_INET_EGRESS,
BPF_CGROUP_INET_SOCK_CREATE,
BPF_CGROUP_SOCK_OPS,
__MAX_BPF_ATTACH_TYPE
};
...
...
@@ -133,6 +140,13 @@ enum bpf_attach_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
* has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
* and NET_IP_ALIGN defined to 2.
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
#define BPF_PSEUDO_MAP_FD 1
/* flags for BPF_MAP_UPDATE_ELEM command */
...
...
@@ -178,6 +192,7 @@ union bpf_attr {
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
...
...
@@ -202,6 +217,21 @@ union bpf_attr {
__u32 repeat;
__u32 duration;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
union {
__u32 start_id;
__u32 prog_id;
__u32 map_id;
};
__u32 next_id;
};
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
__u32 bpf_fd;
__u32 info_len;
__aligned_u64 info;
} info;
} __attribute__((aligned(8)));
/* BPF helper function descriptions:
...
...
@@ -306,8 +336,11 @@ union bpf_attr {
* @flags: room for future extensions
* Return: 0 on success or negative error
*
* u64 bpf_perf_event_read(&map, index)
* Return: Number events read or error code
* u64 bpf_perf_event_read(map, flags)
* read perf event counter value
* @map: pointer to perf_event_array map
* @flags: index of event in the map or bitmask flags
* Return: value of perf event counter read or error code
*
* int bpf_redirect(ifindex, flags)
* redirect to another netdev
...
...
@@ -321,11 +354,11 @@ union bpf_attr {
* @skb: pointer to skb
* Return: realm if != 0
*
* int bpf_perf_event_output(ctx, map,
index
, data, size)
* int bpf_perf_event_output(ctx, map,
flags
, data, size)
* output perf raw sample
* @ctx: struct pt_regs*
* @map: pointer to perf_event_array map
* @
index: index of event in the map
* @
flags: index of event in the map or bitmask flags
* @data: data on stack to be output as raw data
* @size: size of data
* Return: 0 on success or negative error
...
...
@@ -483,6 +516,30 @@ union bpf_attr {
* Get the owner uid of the socket stored inside sk_buff.
* @skb: pointer to skb
* Return: uid of the socket owner on success or overflowuid if failed.
*
* u32 bpf_set_hash(skb, hash)
* Set full skb->hash.
* @skb: pointer to skb
* @hash: hash to set
*
* int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
* Calls setsockopt. Not all opts are available, only those with
* integer optvals plus TCP_CONGESTION.
* Supported levels: SOL_SOCKET and IPROTO_TCP
* @bpf_socket: pointer to bpf_socket
* @level: SOL_SOCKET or IPROTO_TCP
* @optname: option name
* @optval: pointer to option value
* @optlen: length of optval in byes
* Return: 0 or negative error
*
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
* @len_diff: (signed) amount of room to grow/shrink
* @mode: operation mode (enum bpf_adj_room_mode)
* @flags: reserved for future use
* Return: 0 on success or negative error code
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
...
...
@@ -532,7 +589,10 @@ union bpf_attr {
FN(xdp_adjust_head), \
FN(probe_read_str), \
FN(get_socket_cookie), \
FN(get_socket_uid),
FN(get_socket_uid), \
FN(set_hash), \
FN(setsockopt), \
FN(skb_adjust_room),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
...
...
@@ -582,6 +642,11 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
};
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
...
...
@@ -663,5 +728,78 @@ struct xdp_md {
__u32 data_end;
};
#define BPF_TAG_SIZE 8
struct bpf_prog_info {
__u32 type;
__u32 id;
__u8 tag[BPF_TAG_SIZE];
__u32 jited_prog_len;
__u32 xlated_prog_len;
__aligned_u64 jited_prog_insns;
__aligned_u64 xlated_prog_insns;
} __attribute__((aligned(8)));
struct bpf_map_info {
__u32 type;
__u32 id;
__u32 key_size;
__u32 value_size;
__u32 max_entries;
__u32 map_flags;
} __attribute__((aligned(8)));
/* User bpf_sock_ops struct to access socket values and specify request ops
* and their replies.
* Some of this fields are in network (bigendian) byte order and may need
* to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
* New fields can only be added at the end of this structure
*/
struct bpf_sock_ops {
__u32 op;
union {
__u32 reply;
__u32 replylong[4];
};
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
__u32 local_ip4; /* Stored in network byte order */
__u32 remote_ip6[4]; /* Stored in network byte order */
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
};
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
*/
enum {
BPF_SOCK_OPS_VOID,
BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
* -1 if default value should be used
*/
BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
* window (in packets) or -1 if default
* value should be used
*/
BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
* active connection is initialized
*/
BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
* active connection is
* established
*/
BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
* passive connection is
* established
*/
BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
* needs ECN
*/
};
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
#endif /* _UAPI__LINUX_BPF_H__ */
)********"
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment