Commit 0641cada authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-uapi-enums'

Andrii Nakryiko says:

====================
Convert BPF-related UAPI constants, currently defined as #define macro, into
anonymous enums. This has no difference in terms of usage of such constants in
C code (they are still could be used in all the compile-time contexts that
`#define`s can), but they are recorded as part of DWARF type info, and
subsequently get recorded as part of kernel's BTF type info. This allows those
constants to be emitted as part of vmlinux.h auto-generated header file and be
used from BPF programs. Which is especially convenient for all kinds of BPF
helper flags and makes CO-RE BPF programs nicer to write.

libbpf's btf_dump logic currently assumes enum values are signed 32-bit
values, but that doesn't match a typical case, so switch it to emit unsigned
values. Once BTF encoding of BTF_KIND_ENUM is extended to capture signedness
properly, this will be made more flexible.

As an immediate validation of the approach, runqslower's copy of
BPF_F_CURRENT_CPU #define is dropped in favor of its enum variant from
vmlinux.h.

v2->v3:
- convert only constants usable from BPF programs (BPF helper flags, map
  create flags, etc) (Alexei);
v1->v2:
- fix up btf_dump test to use max 32-bit unsigned value instead of negative one.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 320a3606 367d82f1
...@@ -325,44 +325,46 @@ enum bpf_attach_type { ...@@ -325,44 +325,46 @@ enum bpf_attach_type {
#define BPF_PSEUDO_CALL 1 #define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */ /* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */ enum {
#define BPF_NOEXIST 1 /* create new element if it didn't exist */ BPF_ANY = 0, /* create new element or update existing */
#define BPF_EXIST 2 /* update existing element */ BPF_NOEXIST = 1, /* create new element if it didn't exist */
#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */ BPF_EXIST = 2, /* update existing element */
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
};
/* flags for BPF_MAP_CREATE command */ /* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0) enum {
BPF_F_NO_PREALLOC = (1U << 0),
/* Instead of having one common LRU list in the /* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
* which can scale and perform better. * which can scale and perform better.
* Note, the LRU nodes (including free nodes) cannot be moved * Note, the LRU nodes (including free nodes) cannot be moved
* across different LRU lists. * across different LRU lists.
*/ */
#define BPF_F_NO_COMMON_LRU (1U << 1) BPF_F_NO_COMMON_LRU = (1U << 1),
/* Specify numa node during map creation */ /* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2) BPF_F_NUMA_NODE = (1U << 2),
#define BPF_OBJ_NAME_LEN 16U
/* Flags for accessing BPF object from syscall side. */ /* Flags for accessing BPF object from syscall side. */
#define BPF_F_RDONLY (1U << 3) BPF_F_RDONLY = (1U << 3),
#define BPF_F_WRONLY (1U << 4) BPF_F_WRONLY = (1U << 4),
/* Flag for stack_map, store build_id+offset instead of pointer */ /* Flag for stack_map, store build_id+offset instead of pointer */
#define BPF_F_STACK_BUILD_ID (1U << 5) BPF_F_STACK_BUILD_ID = (1U << 5),
/* Zero-initialize hash function seed. This should only be used for testing. */ /* Zero-initialize hash function seed. This should only be used for testing. */
#define BPF_F_ZERO_SEED (1U << 6) BPF_F_ZERO_SEED = (1U << 6),
/* Flags for accessing BPF object from program side. */ /* Flags for accessing BPF object from program side. */
#define BPF_F_RDONLY_PROG (1U << 7) BPF_F_RDONLY_PROG = (1U << 7),
#define BPF_F_WRONLY_PROG (1U << 8) BPF_F_WRONLY_PROG = (1U << 8),
/* Clone map from listener for newly accepted socket */ /* Clone map from listener for newly accepted socket */
#define BPF_F_CLONE (1U << 9) BPF_F_CLONE = (1U << 9),
/* Enable memory-mapping BPF map */ /* Enable memory-mapping BPF map */
#define BPF_F_MMAPABLE (1U << 10) BPF_F_MMAPABLE = (1U << 10),
};
/* Flags for BPF_PROG_QUERY. */ /* Flags for BPF_PROG_QUERY. */
...@@ -391,6 +393,8 @@ struct bpf_stack_build_id { ...@@ -391,6 +393,8 @@ struct bpf_stack_build_id {
}; };
}; };
#define BPF_OBJ_NAME_LEN 16U
union bpf_attr { union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */ struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */ __u32 map_type; /* one of enum bpf_map_type */
...@@ -3045,72 +3049,100 @@ enum bpf_func_id { ...@@ -3045,72 +3049,100 @@ enum bpf_func_id {
/* All flags used by eBPF helper functions, placed here. */ /* All flags used by eBPF helper functions, placed here. */
/* BPF_FUNC_skb_store_bytes flags. */ /* BPF_FUNC_skb_store_bytes flags. */
#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) enum {
#define BPF_F_INVALIDATE_HASH (1ULL << 1) BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
BPF_F_INVALIDATE_HASH = (1ULL << 1),
};
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size. * First 4 bits are for passing the header field size.
*/ */
#define BPF_F_HDR_FIELD_MASK 0xfULL enum {
BPF_F_HDR_FIELD_MASK = 0xfULL,
};
/* BPF_FUNC_l4_csum_replace flags. */ /* BPF_FUNC_l4_csum_replace flags. */
#define BPF_F_PSEUDO_HDR (1ULL << 4) enum {
#define BPF_F_MARK_MANGLED_0 (1ULL << 5) BPF_F_PSEUDO_HDR = (1ULL << 4),
#define BPF_F_MARK_ENFORCE (1ULL << 6) BPF_F_MARK_MANGLED_0 = (1ULL << 5),
BPF_F_MARK_ENFORCE = (1ULL << 6),
};
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
#define BPF_F_INGRESS (1ULL << 0) enum {
BPF_F_INGRESS = (1ULL << 0),
};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
#define BPF_F_TUNINFO_IPV6 (1ULL << 0) enum {
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
};
/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
#define BPF_F_SKIP_FIELD_MASK 0xffULL enum {
#define BPF_F_USER_STACK (1ULL << 8) BPF_F_SKIP_FIELD_MASK = 0xffULL,
BPF_F_USER_STACK = (1ULL << 8),
/* flags used by BPF_FUNC_get_stackid only. */ /* flags used by BPF_FUNC_get_stackid only. */
#define BPF_F_FAST_STACK_CMP (1ULL << 9) BPF_F_FAST_STACK_CMP = (1ULL << 9),
#define BPF_F_REUSE_STACKID (1ULL << 10) BPF_F_REUSE_STACKID = (1ULL << 10),
/* flags used by BPF_FUNC_get_stack only. */ /* flags used by BPF_FUNC_get_stack only. */
#define BPF_F_USER_BUILD_ID (1ULL << 11) BPF_F_USER_BUILD_ID = (1ULL << 11),
};
/* BPF_FUNC_skb_set_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key flags. */
#define BPF_F_ZERO_CSUM_TX (1ULL << 1) enum {
#define BPF_F_DONT_FRAGMENT (1ULL << 2) BPF_F_ZERO_CSUM_TX = (1ULL << 1),
#define BPF_F_SEQ_NUMBER (1ULL << 3) BPF_F_DONT_FRAGMENT = (1ULL << 2),
BPF_F_SEQ_NUMBER = (1ULL << 3),
};
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags. * BPF_FUNC_perf_event_read_value flags.
*/ */
#define BPF_F_INDEX_MASK 0xffffffffULL enum {
#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK BPF_F_INDEX_MASK = 0xffffffffULL,
BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
/* BPF_FUNC_perf_event_output for sk_buff input context. */ /* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
};
/* Current network namespace */ /* Current network namespace */
#define BPF_F_CURRENT_NETNS (-1L) enum {
BPF_F_CURRENT_NETNS = (-1L),
};
/* BPF_FUNC_skb_adjust_room flags. */ /* BPF_FUNC_skb_adjust_room flags. */
#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0) enum {
BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
};
#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff enum {
#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
};
#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
BPF_ADJ_ROOM_ENCAP_L2_MASK) \ BPF_ADJ_ROOM_ENCAP_L2_MASK) \
<< BPF_ADJ_ROOM_ENCAP_L2_SHIFT) << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
/* BPF_FUNC_sysctl_get_name flags. */ /* BPF_FUNC_sysctl_get_name flags. */
#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0) enum {
BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
};
/* BPF_FUNC_sk_storage_get flags */ /* BPF_FUNC_sk_storage_get flags */
#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0) enum {
BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
};
/* BPF_FUNC_read_branch_records flags. */ /* BPF_FUNC_read_branch_records flags. */
#define BPF_F_GET_BRANCH_RECORDS_SIZE (1ULL << 0) enum {
BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
};
/* Mode for BPF_FUNC_skb_adjust_room helper. */ /* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode { enum bpf_adj_room_mode {
...@@ -3529,13 +3561,14 @@ struct bpf_sock_ops { ...@@ -3529,13 +3561,14 @@ struct bpf_sock_ops {
}; };
/* Definitions for bpf_sock_ops_cb_flags */ /* Definitions for bpf_sock_ops_cb_flags */
#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) enum {
#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3) BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
* supported cb flags /* Mask of all currently supported cb flags */
*/ BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
};
/* List of known BPF sock_ops operators. /* List of known BPF sock_ops operators.
* New entries can only be added at the end * New entries can only be added at the end
...@@ -3614,8 +3647,10 @@ enum { ...@@ -3614,8 +3647,10 @@ enum {
BPF_TCP_MAX_STATES /* Leave at the end! */ BPF_TCP_MAX_STATES /* Leave at the end! */
}; };
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ enum {
#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
};
struct bpf_perf_event_value { struct bpf_perf_event_value {
__u64 counter; __u64 counter;
...@@ -3623,12 +3658,16 @@ struct bpf_perf_event_value { ...@@ -3623,12 +3658,16 @@ struct bpf_perf_event_value {
__u64 running; __u64 running;
}; };
#define BPF_DEVCG_ACC_MKNOD (1ULL << 0) enum {
#define BPF_DEVCG_ACC_READ (1ULL << 1) BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
#define BPF_DEVCG_ACC_WRITE (1ULL << 2) BPF_DEVCG_ACC_READ = (1ULL << 1),
BPF_DEVCG_ACC_WRITE = (1ULL << 2),
};
#define BPF_DEVCG_DEV_BLOCK (1ULL << 0) enum {
#define BPF_DEVCG_DEV_CHAR (1ULL << 1) BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
BPF_DEVCG_DEV_CHAR = (1ULL << 1),
};
struct bpf_cgroup_dev_ctx { struct bpf_cgroup_dev_ctx {
/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
...@@ -3644,8 +3683,10 @@ struct bpf_raw_tracepoint_args { ...@@ -3644,8 +3683,10 @@ struct bpf_raw_tracepoint_args {
/* DIRECT: Skip the FIB rules and go to FIB table associated with device /* DIRECT: Skip the FIB rules and go to FIB table associated with device
* OUTPUT: Do lookup from egress perspective; default is ingress * OUTPUT: Do lookup from egress perspective; default is ingress
*/ */
#define BPF_FIB_LOOKUP_DIRECT (1U << 0) enum {
#define BPF_FIB_LOOKUP_OUTPUT (1U << 1) BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
};
enum { enum {
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
...@@ -3717,9 +3758,11 @@ enum bpf_task_fd_type { ...@@ -3717,9 +3758,11 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */ BPF_FD_TYPE_URETPROBE, /* filename + offset */
}; };
#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0) enum {
#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1) BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2) BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
};
struct bpf_flow_keys { struct bpf_flow_keys {
__u16 nhoff; __u16 nhoff;
......
...@@ -6,9 +6,6 @@ ...@@ -6,9 +6,6 @@
#define TASK_RUNNING 0 #define TASK_RUNNING 0
#define BPF_F_INDEX_MASK 0xffffffffULL
#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
const volatile __u64 min_us = 0; const volatile __u64 min_us = 0;
const volatile pid_t targ_pid = 0; const volatile pid_t targ_pid = 0;
......
...@@ -73,7 +73,7 @@ struct bpf_insn { ...@@ -73,7 +73,7 @@ struct bpf_insn {
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
struct bpf_lpm_trie_key { struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
__u8 data[0]; /* Arbitrary size */ __u8 data[]; /* Arbitrary size */
}; };
struct bpf_cgroup_storage_key { struct bpf_cgroup_storage_key {
...@@ -325,44 +325,46 @@ enum bpf_attach_type { ...@@ -325,44 +325,46 @@ enum bpf_attach_type {
#define BPF_PSEUDO_CALL 1 #define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */ /* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */ enum {
#define BPF_NOEXIST 1 /* create new element if it didn't exist */ BPF_ANY = 0, /* create new element or update existing */
#define BPF_EXIST 2 /* update existing element */ BPF_NOEXIST = 1, /* create new element if it didn't exist */
#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */ BPF_EXIST = 2, /* update existing element */
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
};
/* flags for BPF_MAP_CREATE command */ /* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0) enum {
BPF_F_NO_PREALLOC = (1U << 0),
/* Instead of having one common LRU list in the /* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
* which can scale and perform better. * which can scale and perform better.
* Note, the LRU nodes (including free nodes) cannot be moved * Note, the LRU nodes (including free nodes) cannot be moved
* across different LRU lists. * across different LRU lists.
*/ */
#define BPF_F_NO_COMMON_LRU (1U << 1) BPF_F_NO_COMMON_LRU = (1U << 1),
/* Specify numa node during map creation */ /* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2) BPF_F_NUMA_NODE = (1U << 2),
#define BPF_OBJ_NAME_LEN 16U
/* Flags for accessing BPF object from syscall side. */ /* Flags for accessing BPF object from syscall side. */
#define BPF_F_RDONLY (1U << 3) BPF_F_RDONLY = (1U << 3),
#define BPF_F_WRONLY (1U << 4) BPF_F_WRONLY = (1U << 4),
/* Flag for stack_map, store build_id+offset instead of pointer */ /* Flag for stack_map, store build_id+offset instead of pointer */
#define BPF_F_STACK_BUILD_ID (1U << 5) BPF_F_STACK_BUILD_ID = (1U << 5),
/* Zero-initialize hash function seed. This should only be used for testing. */ /* Zero-initialize hash function seed. This should only be used for testing. */
#define BPF_F_ZERO_SEED (1U << 6) BPF_F_ZERO_SEED = (1U << 6),
/* Flags for accessing BPF object from program side. */ /* Flags for accessing BPF object from program side. */
#define BPF_F_RDONLY_PROG (1U << 7) BPF_F_RDONLY_PROG = (1U << 7),
#define BPF_F_WRONLY_PROG (1U << 8) BPF_F_WRONLY_PROG = (1U << 8),
/* Clone map from listener for newly accepted socket */ /* Clone map from listener for newly accepted socket */
#define BPF_F_CLONE (1U << 9) BPF_F_CLONE = (1U << 9),
/* Enable memory-mapping BPF map */ /* Enable memory-mapping BPF map */
#define BPF_F_MMAPABLE (1U << 10) BPF_F_MMAPABLE = (1U << 10),
};
/* Flags for BPF_PROG_QUERY. */ /* Flags for BPF_PROG_QUERY. */
...@@ -391,6 +393,8 @@ struct bpf_stack_build_id { ...@@ -391,6 +393,8 @@ struct bpf_stack_build_id {
}; };
}; };
#define BPF_OBJ_NAME_LEN 16U
union bpf_attr { union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */ struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */ __u32 map_type; /* one of enum bpf_map_type */
...@@ -3045,72 +3049,100 @@ enum bpf_func_id { ...@@ -3045,72 +3049,100 @@ enum bpf_func_id {
/* All flags used by eBPF helper functions, placed here. */ /* All flags used by eBPF helper functions, placed here. */
/* BPF_FUNC_skb_store_bytes flags. */ /* BPF_FUNC_skb_store_bytes flags. */
#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) enum {
#define BPF_F_INVALIDATE_HASH (1ULL << 1) BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
BPF_F_INVALIDATE_HASH = (1ULL << 1),
};
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size. * First 4 bits are for passing the header field size.
*/ */
#define BPF_F_HDR_FIELD_MASK 0xfULL enum {
BPF_F_HDR_FIELD_MASK = 0xfULL,
};
/* BPF_FUNC_l4_csum_replace flags. */ /* BPF_FUNC_l4_csum_replace flags. */
#define BPF_F_PSEUDO_HDR (1ULL << 4) enum {
#define BPF_F_MARK_MANGLED_0 (1ULL << 5) BPF_F_PSEUDO_HDR = (1ULL << 4),
#define BPF_F_MARK_ENFORCE (1ULL << 6) BPF_F_MARK_MANGLED_0 = (1ULL << 5),
BPF_F_MARK_ENFORCE = (1ULL << 6),
};
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
#define BPF_F_INGRESS (1ULL << 0) enum {
BPF_F_INGRESS = (1ULL << 0),
};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
#define BPF_F_TUNINFO_IPV6 (1ULL << 0) enum {
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
};
/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
#define BPF_F_SKIP_FIELD_MASK 0xffULL enum {
#define BPF_F_USER_STACK (1ULL << 8) BPF_F_SKIP_FIELD_MASK = 0xffULL,
BPF_F_USER_STACK = (1ULL << 8),
/* flags used by BPF_FUNC_get_stackid only. */ /* flags used by BPF_FUNC_get_stackid only. */
#define BPF_F_FAST_STACK_CMP (1ULL << 9) BPF_F_FAST_STACK_CMP = (1ULL << 9),
#define BPF_F_REUSE_STACKID (1ULL << 10) BPF_F_REUSE_STACKID = (1ULL << 10),
/* flags used by BPF_FUNC_get_stack only. */ /* flags used by BPF_FUNC_get_stack only. */
#define BPF_F_USER_BUILD_ID (1ULL << 11) BPF_F_USER_BUILD_ID = (1ULL << 11),
};
/* BPF_FUNC_skb_set_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key flags. */
#define BPF_F_ZERO_CSUM_TX (1ULL << 1) enum {
#define BPF_F_DONT_FRAGMENT (1ULL << 2) BPF_F_ZERO_CSUM_TX = (1ULL << 1),
#define BPF_F_SEQ_NUMBER (1ULL << 3) BPF_F_DONT_FRAGMENT = (1ULL << 2),
BPF_F_SEQ_NUMBER = (1ULL << 3),
};
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags. * BPF_FUNC_perf_event_read_value flags.
*/ */
#define BPF_F_INDEX_MASK 0xffffffffULL enum {
#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK BPF_F_INDEX_MASK = 0xffffffffULL,
BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
/* BPF_FUNC_perf_event_output for sk_buff input context. */ /* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
};
/* Current network namespace */ /* Current network namespace */
#define BPF_F_CURRENT_NETNS (-1L) enum {
BPF_F_CURRENT_NETNS = (-1L),
};
/* BPF_FUNC_skb_adjust_room flags. */ /* BPF_FUNC_skb_adjust_room flags. */
#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0) enum {
BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
};
#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff enum {
#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
};
#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
BPF_ADJ_ROOM_ENCAP_L2_MASK) \ BPF_ADJ_ROOM_ENCAP_L2_MASK) \
<< BPF_ADJ_ROOM_ENCAP_L2_SHIFT) << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
/* BPF_FUNC_sysctl_get_name flags. */ /* BPF_FUNC_sysctl_get_name flags. */
#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0) enum {
BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
};
/* BPF_FUNC_sk_storage_get flags */ /* BPF_FUNC_sk_storage_get flags */
#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0) enum {
BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
};
/* BPF_FUNC_read_branch_records flags. */ /* BPF_FUNC_read_branch_records flags. */
#define BPF_F_GET_BRANCH_RECORDS_SIZE (1ULL << 0) enum {
BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
};
/* Mode for BPF_FUNC_skb_adjust_room helper. */ /* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode { enum bpf_adj_room_mode {
...@@ -3529,13 +3561,14 @@ struct bpf_sock_ops { ...@@ -3529,13 +3561,14 @@ struct bpf_sock_ops {
}; };
/* Definitions for bpf_sock_ops_cb_flags */ /* Definitions for bpf_sock_ops_cb_flags */
#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) enum {
#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3) BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
* supported cb flags /* Mask of all currently supported cb flags */
*/ BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
};
/* List of known BPF sock_ops operators. /* List of known BPF sock_ops operators.
* New entries can only be added at the end * New entries can only be added at the end
...@@ -3614,8 +3647,10 @@ enum { ...@@ -3614,8 +3647,10 @@ enum {
BPF_TCP_MAX_STATES /* Leave at the end! */ BPF_TCP_MAX_STATES /* Leave at the end! */
}; };
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ enum {
#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
};
struct bpf_perf_event_value { struct bpf_perf_event_value {
__u64 counter; __u64 counter;
...@@ -3623,12 +3658,16 @@ struct bpf_perf_event_value { ...@@ -3623,12 +3658,16 @@ struct bpf_perf_event_value {
__u64 running; __u64 running;
}; };
#define BPF_DEVCG_ACC_MKNOD (1ULL << 0) enum {
#define BPF_DEVCG_ACC_READ (1ULL << 1) BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
#define BPF_DEVCG_ACC_WRITE (1ULL << 2) BPF_DEVCG_ACC_READ = (1ULL << 1),
BPF_DEVCG_ACC_WRITE = (1ULL << 2),
};
#define BPF_DEVCG_DEV_BLOCK (1ULL << 0) enum {
#define BPF_DEVCG_DEV_CHAR (1ULL << 1) BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
BPF_DEVCG_DEV_CHAR = (1ULL << 1),
};
struct bpf_cgroup_dev_ctx { struct bpf_cgroup_dev_ctx {
/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
...@@ -3644,8 +3683,10 @@ struct bpf_raw_tracepoint_args { ...@@ -3644,8 +3683,10 @@ struct bpf_raw_tracepoint_args {
/* DIRECT: Skip the FIB rules and go to FIB table associated with device /* DIRECT: Skip the FIB rules and go to FIB table associated with device
* OUTPUT: Do lookup from egress perspective; default is ingress * OUTPUT: Do lookup from egress perspective; default is ingress
*/ */
#define BPF_FIB_LOOKUP_DIRECT (1U << 0) enum {
#define BPF_FIB_LOOKUP_OUTPUT (1U << 1) BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
};
enum { enum {
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
...@@ -3717,9 +3758,11 @@ enum bpf_task_fd_type { ...@@ -3717,9 +3758,11 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */ BPF_FD_TYPE_URETPROBE, /* filename + offset */
}; };
#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0) enum {
#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1) BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2) BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
};
struct bpf_flow_keys { struct bpf_flow_keys {
__u16 nhoff; __u16 nhoff;
......
...@@ -916,13 +916,13 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, ...@@ -916,13 +916,13 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
/* enumerators share namespace with typedef idents */ /* enumerators share namespace with typedef idents */
dup_cnt = btf_dump_name_dups(d, d->ident_names, name); dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
if (dup_cnt > 1) { if (dup_cnt > 1) {
btf_dump_printf(d, "\n%s%s___%zu = %d,", btf_dump_printf(d, "\n%s%s___%zu = %u,",
pfx(lvl + 1), name, dup_cnt, pfx(lvl + 1), name, dup_cnt,
(__s32)v->val); (__u32)v->val);
} else { } else {
btf_dump_printf(d, "\n%s%s = %d,", btf_dump_printf(d, "\n%s%s = %u,",
pfx(lvl + 1), name, pfx(lvl + 1), name,
(__s32)v->val); (__u32)v->val);
} }
} }
btf_dump_printf(d, "\n%s}", pfx(lvl)); btf_dump_printf(d, "\n%s}", pfx(lvl));
......
...@@ -13,7 +13,7 @@ enum e1 { ...@@ -13,7 +13,7 @@ enum e1 {
enum e2 { enum e2 {
C = 100, C = 100,
D = -100, D = 4294967295,
E = 0, E = 0,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment