Commit 84085f87 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'enable-bpf_skc-cast-for-networking-progs'

Martin KaFai Lau says:

====================
This set allows networking prog type to directly read fields from
the in-kernel socket type, e.g. "struct tcp_sock".

Patch 2 has the details on the use case.

v3:
- Pass arg_btf_id instead of fn into check_reg_type() in Patch 1 (Lorenz)
- Move arg_btf_id from func_proto to struct bpf_reg_types in Patch 2 (Lorenz)
- Remove test_sock_fields from .gitignore in Patch 8 (Andrii)
- Add tests to have better coverage on the modified helpers (Alexei)
  Patch 13 is added.
- Use "void *sk" as the helper argument in UAPI bpf.h

v3:
- ARG_PTR_TO_SOCK_COMMON_OR_NULL was attempted in v2.  The _OR_NULL was
  needed because the PTR_TO_BTF_ID could be NULL but note that a could be NULL
  PTR_TO_BTF_ID is not a scalar NULL to the verifier.  "_OR_NULL" implicitly
  gives an expectation that the helper can take a scalar NULL which does
  not make sense in most (except one) helpers.  Passing scalar NULL
  should be rejected at the verification time.

  Thus, this patch uses ARG_PTR_TO_BTF_ID_SOCK_COMMON to specify that the
  helper can take both the btf-id ptr or the legacy PTR_TO_SOCK_COMMON but
  not scalar NULL.  It requires the func_proto to explicitly specify the
  arg_btf_id such that there is a very clear expectation that the helper
  can handle a NULL PTR_TO_BTF_ID.

v2:
- Add ARG_PTR_TO_SOCK_COMMON_OR_NULL (Lorenz)
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 182bf3f3 9a856cae
...@@ -292,6 +292,7 @@ enum bpf_arg_type { ...@@ -292,6 +292,7 @@ enum bpf_arg_type {
ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
__BPF_ARG_TYPE_MAX, __BPF_ARG_TYPE_MAX,
}; };
......
...@@ -20,8 +20,6 @@ void bpf_sk_storage_free(struct sock *sk); ...@@ -20,8 +20,6 @@ void bpf_sk_storage_free(struct sock *sk);
extern const struct bpf_func_proto bpf_sk_storage_get_proto; extern const struct bpf_func_proto bpf_sk_storage_get_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_proto; extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
extern const struct bpf_func_proto sk_storage_get_btf_proto;
extern const struct bpf_func_proto sk_storage_delete_btf_proto;
struct bpf_local_storage_elem; struct bpf_local_storage_elem;
struct bpf_sk_storage_diag; struct bpf_sk_storage_diag;
......
...@@ -2512,7 +2512,7 @@ union bpf_attr { ...@@ -2512,7 +2512,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple. * tuple.
* *
* long bpf_sk_release(struct bpf_sock *sock) * long bpf_sk_release(void *sock)
* Description * Description
* Release the reference held by *sock*. *sock* must be a * Release the reference held by *sock*. *sock* must be a
* non-**NULL** pointer that was returned from * non-**NULL** pointer that was returned from
...@@ -2692,7 +2692,7 @@ union bpf_attr { ...@@ -2692,7 +2692,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple. * tuple.
* *
* long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description * Description
* Check whether *iph* and *th* contain a valid SYN cookie ACK for * Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in *sk*. * the listening socket in *sk*.
...@@ -2861,6 +2861,7 @@ union bpf_attr { ...@@ -2861,6 +2861,7 @@ union bpf_attr {
* 0 on success. * 0 on success.
* *
* **-ENOENT** if the bpf-local-storage cannot be found. * **-ENOENT** if the bpf-local-storage cannot be found.
* **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
* *
* long bpf_send_signal(u32 sig) * long bpf_send_signal(u32 sig)
* Description * Description
...@@ -2877,7 +2878,7 @@ union bpf_attr { ...@@ -2877,7 +2878,7 @@ union bpf_attr {
* *
* **-EAGAIN** if bpf program can try again. * **-EAGAIN** if bpf program can try again.
* *
* s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description * Description
* Try to issue a SYN cookie for the packet with corresponding * Try to issue a SYN cookie for the packet with corresponding
* IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
...@@ -3106,7 +3107,7 @@ union bpf_attr { ...@@ -3106,7 +3107,7 @@ union bpf_attr {
* Return * Return
* The id is returned or 0 in case the id could not be retrieved. * The id is returned or 0 in case the id could not be retrieved.
* *
* long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
* Description * Description
* Helper is overloaded depending on BPF program type. This * Helper is overloaded depending on BPF program type. This
* description applies to **BPF_PROG_TYPE_SCHED_CLS** and * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
...@@ -3234,11 +3235,11 @@ union bpf_attr { ...@@ -3234,11 +3235,11 @@ union bpf_attr {
* *
* **-EOVERFLOW** if an overflow happened: The same object will be tried again. * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
* *
* u64 bpf_sk_cgroup_id(struct bpf_sock *sk) * u64 bpf_sk_cgroup_id(void *sk)
* Description * Description
* Return the cgroup v2 id of the socket *sk*. * Return the cgroup v2 id of the socket *sk*.
* *
* *sk* must be a non-**NULL** pointer to a full socket, e.g. one * *sk* must be a non-**NULL** pointer to a socket, e.g. one
* returned from **bpf_sk_lookup_xxx**\ (), * returned from **bpf_sk_lookup_xxx**\ (),
* **bpf_sk_fullsock**\ (), etc. The format of returned id is * **bpf_sk_fullsock**\ (), etc. The format of returned id is
* same as in **bpf_skb_cgroup_id**\ (). * same as in **bpf_skb_cgroup_id**\ ().
...@@ -3248,7 +3249,7 @@ union bpf_attr { ...@@ -3248,7 +3249,7 @@ union bpf_attr {
* Return * Return
* The id is returned or 0 in case the id could not be retrieved. * The id is returned or 0 in case the id could not be retrieved.
* *
* u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level) * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
* Description * Description
* Return id of cgroup v2 that is ancestor of cgroup associated * Return id of cgroup v2 that is ancestor of cgroup associated
* with the *sk* at the *ancestor_level*. The root cgroup is at * with the *sk* at the *ancestor_level*. The root cgroup is at
......
...@@ -56,9 +56,9 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -56,9 +56,9 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_inode_storage_delete: case BPF_FUNC_inode_storage_delete:
return &bpf_inode_storage_delete_proto; return &bpf_inode_storage_delete_proto;
case BPF_FUNC_sk_storage_get: case BPF_FUNC_sk_storage_get:
return &sk_storage_get_btf_proto; return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete: case BPF_FUNC_sk_storage_delete:
return &sk_storage_delete_btf_proto; return &bpf_sk_storage_delete_proto;
default: default:
return tracing_prog_func_proto(func_id, prog); return tracing_prog_func_proto(func_id, prog);
} }
......
...@@ -486,7 +486,12 @@ static bool is_acquire_function(enum bpf_func_id func_id, ...@@ -486,7 +486,12 @@ static bool is_acquire_function(enum bpf_func_id func_id,
static bool is_ptr_cast_function(enum bpf_func_id func_id) static bool is_ptr_cast_function(enum bpf_func_id func_id)
{ {
return func_id == BPF_FUNC_tcp_sock || return func_id == BPF_FUNC_tcp_sock ||
func_id == BPF_FUNC_sk_fullsock; func_id == BPF_FUNC_sk_fullsock ||
func_id == BPF_FUNC_skc_to_tcp_sock ||
func_id == BPF_FUNC_skc_to_tcp6_sock ||
func_id == BPF_FUNC_skc_to_udp6_sock ||
func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
func_id == BPF_FUNC_skc_to_tcp_request_sock;
} }
/* string representation of 'enum bpf_reg_type' */ /* string representation of 'enum bpf_reg_type' */
...@@ -3953,6 +3958,7 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env, ...@@ -3953,6 +3958,7 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env,
struct bpf_reg_types { struct bpf_reg_types {
const enum bpf_reg_type types[10]; const enum bpf_reg_type types[10];
u32 *btf_id;
}; };
static const struct bpf_reg_types map_key_value_types = { static const struct bpf_reg_types map_key_value_types = {
...@@ -3973,6 +3979,17 @@ static const struct bpf_reg_types sock_types = { ...@@ -3973,6 +3979,17 @@ static const struct bpf_reg_types sock_types = {
}, },
}; };
static const struct bpf_reg_types btf_id_sock_common_types = {
.types = {
PTR_TO_SOCK_COMMON,
PTR_TO_SOCKET,
PTR_TO_TCP_SOCK,
PTR_TO_XDP_SOCK,
PTR_TO_BTF_ID,
},
.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
};
static const struct bpf_reg_types mem_types = { static const struct bpf_reg_types mem_types = {
.types = { .types = {
PTR_TO_STACK, PTR_TO_STACK,
...@@ -4014,6 +4031,7 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { ...@@ -4014,6 +4031,7 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
[ARG_PTR_TO_CTX] = &context_types, [ARG_PTR_TO_CTX] = &context_types,
[ARG_PTR_TO_CTX_OR_NULL] = &context_types, [ARG_PTR_TO_CTX_OR_NULL] = &context_types,
[ARG_PTR_TO_SOCK_COMMON] = &sock_types, [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
[ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
[ARG_PTR_TO_SOCKET] = &fullsock_types, [ARG_PTR_TO_SOCKET] = &fullsock_types,
[ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types, [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
[ARG_PTR_TO_BTF_ID] = &btf_ptr_types, [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
...@@ -4028,19 +4046,27 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { ...@@ -4028,19 +4046,27 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
}; };
static int check_reg_type(struct bpf_verifier_env *env, u32 regno, static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
const struct bpf_reg_types *compatible) enum bpf_arg_type arg_type,
const u32 *arg_btf_id)
{ {
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
enum bpf_reg_type expected, type = reg->type; enum bpf_reg_type expected, type = reg->type;
const struct bpf_reg_types *compatible;
int i, j; int i, j;
compatible = compatible_reg_types[arg_type];
if (!compatible) {
verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
return -EFAULT;
}
for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
expected = compatible->types[i]; expected = compatible->types[i];
if (expected == NOT_INIT) if (expected == NOT_INIT)
break; break;
if (type == expected) if (type == expected)
return 0; goto found;
} }
verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]); verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
...@@ -4048,6 +4074,33 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, ...@@ -4048,6 +4074,33 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
verbose(env, "%s, ", reg_type_str[compatible->types[j]]); verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
verbose(env, "%s\n", reg_type_str[compatible->types[j]]); verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
return -EACCES; return -EACCES;
found:
if (type == PTR_TO_BTF_ID) {
if (!arg_btf_id) {
if (!compatible->btf_id) {
verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
return -EFAULT;
}
arg_btf_id = compatible->btf_id;
}
if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
*arg_btf_id)) {
verbose(env, "R%d is of type %s but %s is expected\n",
regno, kernel_type_name(reg->btf_id),
kernel_type_name(*arg_btf_id));
return -EACCES;
}
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
regno);
return -EACCES;
}
}
return 0;
} }
static int check_func_arg(struct bpf_verifier_env *env, u32 arg, static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
...@@ -4057,7 +4110,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -4057,7 +4110,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
u32 regno = BPF_REG_1 + arg; u32 regno = BPF_REG_1 + arg;
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
enum bpf_arg_type arg_type = fn->arg_type[arg]; enum bpf_arg_type arg_type = fn->arg_type[arg];
const struct bpf_reg_types *compatible;
enum bpf_reg_type type = reg->type; enum bpf_reg_type type = reg->type;
int err = 0; int err = 0;
...@@ -4097,35 +4149,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -4097,35 +4149,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
*/ */
goto skip_type_check; goto skip_type_check;
compatible = compatible_reg_types[arg_type]; err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
if (!compatible) {
verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
return -EFAULT;
}
err = check_reg_type(env, regno, compatible);
if (err) if (err)
return err; return err;
if (type == PTR_TO_BTF_ID) { if (type == PTR_TO_CTX) {
const u32 *btf_id = fn->arg_btf_id[arg];
if (!btf_id) {
verbose(env, "verifier internal error: missing BTF ID\n");
return -EFAULT;
}
if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id, *btf_id)) {
verbose(env, "R%d is of type %s but %s is expected\n",
regno, kernel_type_name(reg->btf_id), kernel_type_name(*btf_id));
return -EACCES;
}
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
regno);
return -EACCES;
}
} else if (type == PTR_TO_CTX) {
err = check_ctx_reg(env, reg, regno); err = check_ctx_reg(env, reg, regno);
if (err < 0) if (err < 0)
return err; return err;
...@@ -4573,10 +4601,14 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn) ...@@ -4573,10 +4601,14 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn)
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
return false; return false;
if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
return false;
}
return true; return true;
} }
......
...@@ -269,7 +269,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, ...@@ -269,7 +269,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
{ {
struct bpf_local_storage_data *sdata; struct bpf_local_storage_data *sdata;
if (flags > BPF_SK_STORAGE_GET_F_CREATE) if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
return (unsigned long)NULL; return (unsigned long)NULL;
sdata = sk_storage_lookup(sk, map, true); sdata = sk_storage_lookup(sk, map, true);
...@@ -299,6 +299,9 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, ...@@ -299,6 +299,9 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk) BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
{ {
if (!sk || !sk_fullsock(sk))
return -EINVAL;
if (refcount_inc_not_zero(&sk->sk_refcnt)) { if (refcount_inc_not_zero(&sk->sk_refcnt)) {
int err; int err;
...@@ -355,7 +358,7 @@ const struct bpf_func_proto bpf_sk_storage_get_proto = { ...@@ -355,7 +358,7 @@ const struct bpf_func_proto bpf_sk_storage_get_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_SOCKET, .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING,
}; };
...@@ -375,27 +378,7 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = { ...@@ -375,27 +378,7 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_SOCKET, .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
};
const struct bpf_func_proto sk_storage_get_btf_proto = {
.func = bpf_sk_storage_get,
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
};
const struct bpf_func_proto sk_storage_delete_btf_proto = {
.func = bpf_sk_storage_delete,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
}; };
struct bpf_sk_storage_diag { struct bpf_sk_storage_diag {
......
...@@ -77,6 +77,9 @@ ...@@ -77,6 +77,9 @@
#include <net/transp_v6.h> #include <net/transp_v6.h>
#include <linux/btf_ids.h> #include <linux/btf_ids.h>
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id);
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len) int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{ {
if (in_compat_syscall()) { if (in_compat_syscall()) {
...@@ -4085,18 +4088,17 @@ static inline u64 __bpf_sk_cgroup_id(struct sock *sk) ...@@ -4085,18 +4088,17 @@ static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
{ {
struct cgroup *cgrp; struct cgroup *cgrp;
sk = sk_to_full_sk(sk);
if (!sk || !sk_fullsock(sk))
return 0;
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
return cgroup_id(cgrp); return cgroup_id(cgrp);
} }
BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb) BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
{ {
struct sock *sk = skb_to_full_sk(skb); return __bpf_sk_cgroup_id(skb->sk);
if (!sk || !sk_fullsock(sk))
return 0;
return __bpf_sk_cgroup_id(sk);
} }
static const struct bpf_func_proto bpf_skb_cgroup_id_proto = { static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
...@@ -4112,6 +4114,10 @@ static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk, ...@@ -4112,6 +4114,10 @@ static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
struct cgroup *ancestor; struct cgroup *ancestor;
struct cgroup *cgrp; struct cgroup *cgrp;
sk = sk_to_full_sk(sk);
if (!sk || !sk_fullsock(sk))
return 0;
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
ancestor = cgroup_ancestor(cgrp, ancestor_level); ancestor = cgroup_ancestor(cgrp, ancestor_level);
if (!ancestor) if (!ancestor)
...@@ -4123,12 +4129,7 @@ static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk, ...@@ -4123,12 +4129,7 @@ static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int, BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
ancestor_level) ancestor_level)
{ {
struct sock *sk = skb_to_full_sk(skb); return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level);
if (!sk || !sk_fullsock(sk))
return 0;
return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
} }
static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = { static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
...@@ -4148,7 +4149,7 @@ static const struct bpf_func_proto bpf_sk_cgroup_id_proto = { ...@@ -4148,7 +4149,7 @@ static const struct bpf_func_proto bpf_sk_cgroup_id_proto = {
.func = bpf_sk_cgroup_id, .func = bpf_sk_cgroup_id,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCKET, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
}; };
BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level) BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
...@@ -4160,7 +4161,7 @@ static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = { ...@@ -4160,7 +4161,7 @@ static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = {
.func = bpf_sk_ancestor_cgroup_id, .func = bpf_sk_ancestor_cgroup_id,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCKET, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
}; };
#endif #endif
...@@ -5694,7 +5695,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { ...@@ -5694,7 +5695,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
BPF_CALL_1(bpf_sk_release, struct sock *, sk) BPF_CALL_1(bpf_sk_release, struct sock *, sk)
{ {
if (sk_is_refcounted(sk)) if (sk && sk_is_refcounted(sk))
sock_gen_put(sk); sock_gen_put(sk);
return 0; return 0;
} }
...@@ -5703,7 +5704,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = { ...@@ -5703,7 +5704,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
.func = bpf_sk_release, .func = bpf_sk_release,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCK_COMMON, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
}; };
BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
...@@ -6085,7 +6086,7 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len ...@@ -6085,7 +6086,7 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
u32 cookie; u32 cookie;
int ret; int ret;
if (unlikely(th_len < sizeof(*th))) if (unlikely(!sk || th_len < sizeof(*th)))
return -EINVAL; return -EINVAL;
/* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */ /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
...@@ -6138,7 +6139,7 @@ static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = { ...@@ -6138,7 +6139,7 @@ static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
.gpl_only = true, .gpl_only = true,
.pkt_access = true, .pkt_access = true,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCK_COMMON, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_PTR_TO_MEM, .arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE, .arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_PTR_TO_MEM, .arg4_type = ARG_PTR_TO_MEM,
...@@ -6152,7 +6153,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, ...@@ -6152,7 +6153,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
u32 cookie; u32 cookie;
u16 mss; u16 mss;
if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4)) if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4))
return -EINVAL; return -EINVAL;
if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
...@@ -6207,7 +6208,7 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = { ...@@ -6207,7 +6208,7 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
.gpl_only = true, /* __cookie_v*_init_sequence() is GPL */ .gpl_only = true, /* __cookie_v*_init_sequence() is GPL */
.pkt_access = true, .pkt_access = true,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCK_COMMON, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_PTR_TO_MEM, .arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE, .arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_PTR_TO_MEM, .arg4_type = ARG_PTR_TO_MEM,
...@@ -6216,7 +6217,7 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = { ...@@ -6216,7 +6217,7 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags) BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
{ {
if (flags != 0) if (!sk || flags != 0)
return -EINVAL; return -EINVAL;
if (!skb_at_tc_ingress(skb)) if (!skb_at_tc_ingress(skb))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -6240,7 +6241,7 @@ static const struct bpf_func_proto bpf_sk_assign_proto = { ...@@ -6240,7 +6241,7 @@ static const struct bpf_func_proto bpf_sk_assign_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX, .arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_SOCK_COMMON, .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg3_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING,
}; };
...@@ -6620,7 +6621,7 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6620,7 +6621,7 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL; return NULL;
} }
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -6639,7 +6640,7 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6639,7 +6640,7 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto; return &bpf_skb_event_output_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -6800,7 +6801,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6800,7 +6801,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_assign_proto; return &bpf_sk_assign_proto;
#endif #endif
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -6841,7 +6842,7 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6841,7 +6842,7 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_gen_syncookie_proto; return &bpf_tcp_gen_syncookie_proto;
#endif #endif
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -6883,7 +6884,7 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6883,7 +6884,7 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_sock_proto; return &bpf_tcp_sock_proto;
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -6929,7 +6930,7 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6929,7 +6930,7 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_cgroup_classid_curr_proto; return &bpf_get_cgroup_classid_curr_proto;
#endif #endif
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -6971,7 +6972,7 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6971,7 +6972,7 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skc_lookup_tcp_proto; return &bpf_skc_lookup_tcp_proto;
#endif #endif
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -6982,7 +6983,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6982,7 +6983,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_load_bytes: case BPF_FUNC_skb_load_bytes:
return &bpf_flow_dissector_load_bytes_proto; return &bpf_flow_dissector_load_bytes_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -7009,7 +7010,7 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -7009,7 +7010,7 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_under_cgroup: case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto; return &bpf_skb_under_cgroup_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -9746,7 +9747,7 @@ sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -9746,7 +9747,7 @@ sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_sk_release: case BPF_FUNC_sk_release:
return &bpf_sk_release_proto; return &bpf_sk_release_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
} }
...@@ -9913,8 +9914,7 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { ...@@ -9913,8 +9914,7 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
.func = bpf_skc_to_tcp6_sock, .func = bpf_skc_to_tcp6_sock,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
}; };
...@@ -9930,8 +9930,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { ...@@ -9930,8 +9930,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
.func = bpf_skc_to_tcp_sock, .func = bpf_skc_to_tcp_sock,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
}; };
...@@ -9954,8 +9953,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { ...@@ -9954,8 +9953,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
.func = bpf_skc_to_tcp_timewait_sock, .func = bpf_skc_to_tcp_timewait_sock,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
}; };
...@@ -9978,8 +9976,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { ...@@ -9978,8 +9976,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
.func = bpf_skc_to_tcp_request_sock, .func = bpf_skc_to_tcp_request_sock,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
}; };
...@@ -10000,7 +9997,37 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { ...@@ -10000,7 +9997,37 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
.func = bpf_skc_to_udp6_sock, .func = bpf_skc_to_udp6_sock,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
}; };
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id)
{
const struct bpf_func_proto *func;
switch (func_id) {
case BPF_FUNC_skc_to_tcp6_sock:
func = &bpf_skc_to_tcp6_sock_proto;
break;
case BPF_FUNC_skc_to_tcp_sock:
func = &bpf_skc_to_tcp_sock_proto;
break;
case BPF_FUNC_skc_to_tcp_timewait_sock:
func = &bpf_skc_to_tcp_timewait_sock_proto;
break;
case BPF_FUNC_skc_to_tcp_request_sock:
func = &bpf_skc_to_tcp_request_sock_proto;
break;
case BPF_FUNC_skc_to_udp6_sock:
func = &bpf_skc_to_udp6_sock_proto;
break;
default:
return bpf_base_func_proto(func_id);
}
if (!perfmon_capable())
return NULL;
return func;
}
...@@ -28,22 +28,6 @@ static u32 unsupported_ops[] = { ...@@ -28,22 +28,6 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type; static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id; static u32 tcp_sock_id, sock_id;
static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
static void convert_sk_func_proto(struct bpf_func_proto *to, const struct bpf_func_proto *from)
{
int i;
*to = *from;
for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
to->arg_type[i] = ARG_PTR_TO_BTF_ID;
to->arg_btf_id[i] = &tcp_sock_id;
}
}
}
static int bpf_tcp_ca_init(struct btf *btf) static int bpf_tcp_ca_init(struct btf *btf)
{ {
s32 type_id; s32 type_id;
...@@ -59,9 +43,6 @@ static int bpf_tcp_ca_init(struct btf *btf) ...@@ -59,9 +43,6 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id; tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id); tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
convert_sk_func_proto(&btf_sk_storage_get_proto, &bpf_sk_storage_get_proto);
convert_sk_func_proto(&btf_sk_storage_delete_proto, &bpf_sk_storage_delete_proto);
return 0; return 0;
} }
...@@ -188,9 +169,9 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, ...@@ -188,9 +169,9 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_tcp_send_ack: case BPF_FUNC_tcp_send_ack:
return &bpf_tcp_send_ack_proto; return &bpf_tcp_send_ack_proto;
case BPF_FUNC_sk_storage_get: case BPF_FUNC_sk_storage_get:
return &btf_sk_storage_get_proto; return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete: case BPF_FUNC_sk_storage_delete:
return &btf_sk_storage_delete_proto; return &bpf_sk_storage_delete_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
......
...@@ -2512,7 +2512,7 @@ union bpf_attr { ...@@ -2512,7 +2512,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple. * tuple.
* *
* long bpf_sk_release(struct bpf_sock *sock) * long bpf_sk_release(void *sock)
* Description * Description
* Release the reference held by *sock*. *sock* must be a * Release the reference held by *sock*. *sock* must be a
* non-**NULL** pointer that was returned from * non-**NULL** pointer that was returned from
...@@ -2692,7 +2692,7 @@ union bpf_attr { ...@@ -2692,7 +2692,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple. * tuple.
* *
* long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description * Description
* Check whether *iph* and *th* contain a valid SYN cookie ACK for * Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in *sk*. * the listening socket in *sk*.
...@@ -2861,6 +2861,7 @@ union bpf_attr { ...@@ -2861,6 +2861,7 @@ union bpf_attr {
* 0 on success. * 0 on success.
* *
* **-ENOENT** if the bpf-local-storage cannot be found. * **-ENOENT** if the bpf-local-storage cannot be found.
* **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
* *
* long bpf_send_signal(u32 sig) * long bpf_send_signal(u32 sig)
* Description * Description
...@@ -2877,7 +2878,7 @@ union bpf_attr { ...@@ -2877,7 +2878,7 @@ union bpf_attr {
* *
* **-EAGAIN** if bpf program can try again. * **-EAGAIN** if bpf program can try again.
* *
* s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description * Description
* Try to issue a SYN cookie for the packet with corresponding * Try to issue a SYN cookie for the packet with corresponding
* IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
...@@ -3106,7 +3107,7 @@ union bpf_attr { ...@@ -3106,7 +3107,7 @@ union bpf_attr {
* Return * Return
* The id is returned or 0 in case the id could not be retrieved. * The id is returned or 0 in case the id could not be retrieved.
* *
* long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
* Description * Description
* Helper is overloaded depending on BPF program type. This * Helper is overloaded depending on BPF program type. This
* description applies to **BPF_PROG_TYPE_SCHED_CLS** and * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
...@@ -3234,11 +3235,11 @@ union bpf_attr { ...@@ -3234,11 +3235,11 @@ union bpf_attr {
* *
* **-EOVERFLOW** if an overflow happened: The same object will be tried again. * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
* *
* u64 bpf_sk_cgroup_id(struct bpf_sock *sk) * u64 bpf_sk_cgroup_id(void *sk)
* Description * Description
* Return the cgroup v2 id of the socket *sk*. * Return the cgroup v2 id of the socket *sk*.
* *
* *sk* must be a non-**NULL** pointer to a full socket, e.g. one * *sk* must be a non-**NULL** pointer to a socket, e.g. one
* returned from **bpf_sk_lookup_xxx**\ (), * returned from **bpf_sk_lookup_xxx**\ (),
* **bpf_sk_fullsock**\ (), etc. The format of returned id is * **bpf_sk_fullsock**\ (), etc. The format of returned id is
* same as in **bpf_skb_cgroup_id**\ (). * same as in **bpf_skb_cgroup_id**\ ().
...@@ -3248,7 +3249,7 @@ union bpf_attr { ...@@ -3248,7 +3249,7 @@ union bpf_attr {
* Return * Return
* The id is returned or 0 in case the id could not be retrieved. * The id is returned or 0 in case the id could not be retrieved.
* *
* u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level) * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
* Description * Description
* Return id of cgroup v2 that is ancestor of cgroup associated * Return id of cgroup v2 that is ancestor of cgroup associated
* with the *sk* at the *ancestor_level*. The root cgroup is at * with the *sk* at the *ancestor_level*. The root cgroup is at
......
...@@ -13,7 +13,6 @@ test_verifier_log ...@@ -13,7 +13,6 @@ test_verifier_log
feature feature
test_sock test_sock
test_sock_addr test_sock_addr
test_sock_fields
urandom_read urandom_read
test_sockmap test_sockmap
test_lirc_mode2_user test_lirc_mode2_user
......
...@@ -35,7 +35,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test ...@@ -35,7 +35,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_sockmap get_cgroup_id_user test_socket_cookie \ test_sock test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \ test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \ test_netcnt test_tcpnotify_user test_sysctl \
test_progs-no_alu32 \ test_progs-no_alu32 \
test_current_pid_tgid_new_ns test_current_pid_tgid_new_ns
......
...@@ -16,6 +16,7 @@ BPF_PROG(name, args) ...@@ -16,6 +16,7 @@ BPF_PROG(name, args)
struct sock_common { struct sock_common {
unsigned char skc_state; unsigned char skc_state;
__u16 skc_num;
} __attribute__((preserve_access_index)); } __attribute__((preserve_access_index));
enum sk_pacing { enum sk_pacing {
...@@ -45,6 +46,10 @@ struct inet_connection_sock { ...@@ -45,6 +46,10 @@ struct inet_connection_sock {
__u64 icsk_ca_priv[104 / sizeof(__u64)]; __u64 icsk_ca_priv[104 / sizeof(__u64)];
} __attribute__((preserve_access_index)); } __attribute__((preserve_access_index));
struct request_sock {
struct sock_common __req_common;
} __attribute__((preserve_access_index));
struct tcp_sock { struct tcp_sock {
struct inet_connection_sock inet_conn; struct inet_connection_sock inet_conn;
...@@ -115,14 +120,6 @@ enum tcp_ca_event { ...@@ -115,14 +120,6 @@ enum tcp_ca_event {
CA_EVENT_ECN_IS_CE = 5, CA_EVENT_ECN_IS_CE = 5,
}; };
enum tcp_ca_state {
TCP_CA_Open = 0,
TCP_CA_Disorder = 1,
TCP_CA_CWR = 2,
TCP_CA_Recovery = 3,
TCP_CA_Loss = 4
};
struct ack_sample { struct ack_sample {
__u32 pkts_acked; __u32 pkts_acked;
__s32 rtt_us; __s32 rtt_us;
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sched.h>
#include <linux/compiler.h>
#include <bpf/libbpf.h>
#include "network_helpers.h"
#include "test_progs.h"
#include "test_btf_skc_cls_ingress.skel.h"
struct test_btf_skc_cls_ingress *skel;
struct sockaddr_in6 srv_sa6;
static __u32 duration;
#define PROG_PIN_FILE "/sys/fs/bpf/btf_skc_cls_ingress"
static int write_sysctl(const char *sysctl, const char *value)
{
int fd, err, len;
fd = open(sysctl, O_WRONLY);
if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n",
sysctl, strerror(errno), errno))
return -1;
len = strlen(value);
err = write(fd, value, len);
close(fd);
if (CHECK(err != len, "write sysctl",
"write(%s, %s, %d): err:%d %s (%d)\n",
sysctl, value, len, err, strerror(errno), errno))
return -1;
return 0;
}
static int prepare_netns(void)
{
if (CHECK(unshare(CLONE_NEWNET), "create netns",
"unshare(CLONE_NEWNET): %s (%d)",
strerror(errno), errno))
return -1;
if (CHECK(system("ip link set dev lo up"),
"ip link set dev lo up", "failed\n"))
return -1;
if (CHECK(system("tc qdisc add dev lo clsact"),
"tc qdisc add dev lo clsact", "failed\n"))
return -1;
if (CHECK(system("tc filter add dev lo ingress bpf direct-action object-pinned " PROG_PIN_FILE),
"install tc cls-prog at ingress", "failed\n"))
return -1;
/* Ensure 20 bytes options (i.e. in total 40 bytes tcp header) for the
* bpf_tcp_gen_syncookie() helper.
*/
if (write_sysctl("/proc/sys/net/ipv4/tcp_window_scaling", "1") ||
write_sysctl("/proc/sys/net/ipv4/tcp_timestamps", "1") ||
write_sysctl("/proc/sys/net/ipv4/tcp_sack", "1"))
return -1;
return 0;
}
static void reset_test(void)
{
memset(&skel->bss->srv_sa6, 0, sizeof(skel->bss->srv_sa6));
skel->bss->listen_tp_sport = 0;
skel->bss->req_sk_sport = 0;
skel->bss->recv_cookie = 0;
skel->bss->gen_cookie = 0;
skel->bss->linum = 0;
}
static void print_err_line(void)
{
if (skel->bss->linum)
printf("bpf prog error at line %u\n", skel->bss->linum);
}
static void test_conn(void)
{
int listen_fd = -1, cli_fd = -1, err;
socklen_t addrlen = sizeof(srv_sa6);
int srv_port;
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (CHECK_FAIL(listen_fd == -1))
return;
err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
errno))
goto done;
memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
srv_port = ntohs(srv_sa6.sin6_port);
cli_fd = connect_to_fd(listen_fd, 0);
if (CHECK_FAIL(cli_fd == -1))
goto done;
if (CHECK(skel->bss->listen_tp_sport != srv_port ||
skel->bss->req_sk_sport != srv_port,
"Unexpected sk src port",
"listen_tp_sport:%u req_sk_sport:%u expected:%u\n",
skel->bss->listen_tp_sport, skel->bss->req_sk_sport,
srv_port))
goto done;
if (CHECK(skel->bss->gen_cookie || skel->bss->recv_cookie,
"Unexpected syncookie states",
"gen_cookie:%u recv_cookie:%u\n",
skel->bss->gen_cookie, skel->bss->recv_cookie))
goto done;
CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n",
skel->bss->linum);
done:
if (listen_fd != -1)
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
}
static void test_syncookie(void)
{
int listen_fd = -1, cli_fd = -1, err;
socklen_t addrlen = sizeof(srv_sa6);
int srv_port;
/* Enforce syncookie mode */
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2"))
return;
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
if (CHECK_FAIL(listen_fd == -1))
return;
err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
errno))
goto done;
memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
srv_port = ntohs(srv_sa6.sin6_port);
cli_fd = connect_to_fd(listen_fd, 0);
if (CHECK_FAIL(cli_fd == -1))
goto done;
if (CHECK(skel->bss->listen_tp_sport != srv_port,
"Unexpected tp src port",
"listen_tp_sport:%u expected:%u\n",
skel->bss->listen_tp_sport, srv_port))
goto done;
if (CHECK(skel->bss->req_sk_sport,
"Unexpected req_sk src port",
"req_sk_sport:%u expected:0\n",
skel->bss->req_sk_sport))
goto done;
if (CHECK(!skel->bss->gen_cookie ||
skel->bss->gen_cookie != skel->bss->recv_cookie,
"Unexpected syncookie states",
"gen_cookie:%u recv_cookie:%u\n",
skel->bss->gen_cookie, skel->bss->recv_cookie))
goto done;
CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n",
skel->bss->linum);
done:
if (listen_fd != -1)
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
}
struct test {
const char *desc;
void (*run)(void);
};
#define DEF_TEST(name) { #name, test_##name }
static struct test tests[] = {
DEF_TEST(conn),
DEF_TEST(syncookie),
};
void test_btf_skc_cls_ingress(void)
{
int i, err;
skel = test_btf_skc_cls_ingress__open_and_load();
if (CHECK(!skel, "test_btf_skc_cls_ingress__open_and_load", "failed\n"))
return;
err = bpf_program__pin(skel->progs.cls_ingress, PROG_PIN_FILE);
if (CHECK(err, "bpf_program__pin",
"cannot pin bpf prog to %s. err:%d\n", PROG_PIN_FILE, err)) {
test_btf_skc_cls_ingress__destroy(skel);
return;
}
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (!test__start_subtest(tests[i].desc))
continue;
if (prepare_netns())
break;
tests[i].run();
print_err_line();
reset_test();
}
bpf_program__unpin(skel->progs.cls_ingress, PROG_PIN_FILE);
test_btf_skc_cls_ingress__destroy(skel);
}
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
*/ */
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/tcp.h>
#include "bpf_tcp_helpers.h" #include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <stddef.h> #include <stddef.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/stddef.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
#include "bpf_tcp_helpers.h" #include "bpf_tcp_helpers.h"
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <string.h>
#include <errno.h>
#include <netinet/in.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_tcp_helpers.h"
struct sockaddr_in6 srv_sa6 = {};
__u16 listen_tp_sport = 0;
__u16 req_sk_sport = 0;
__u32 recv_cookie = 0;
__u32 gen_cookie = 0;
__u32 linum = 0;
#define LOG() ({ if (!linum) linum = __LINE__; })
static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
struct tcp_sock *tp,
struct __sk_buff *skb)
{
if (th->syn) {
__s64 mss_cookie;
void *data_end;
data_end = (void *)(long)(skb->data_end);
if (th->doff * 4 != 40) {
LOG();
return;
}
if ((void *)th + 40 > data_end) {
LOG();
return;
}
mss_cookie = bpf_tcp_gen_syncookie(tp, ip6h, sizeof(*ip6h),
th, 40);
if (mss_cookie < 0) {
if (mss_cookie != -ENOENT)
LOG();
} else {
gen_cookie = (__u32)mss_cookie;
}
} else if (gen_cookie) {
/* It was in cookie mode */
int ret = bpf_tcp_check_syncookie(tp, ip6h, sizeof(*ip6h),
th, sizeof(*th));
if (ret < 0) {
if (ret != -ENOENT)
LOG();
} else {
recv_cookie = bpf_ntohl(th->ack_seq) - 1;
}
}
}
static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
{
struct bpf_sock_tuple *tuple;
struct bpf_sock *bpf_skc;
unsigned int tuple_len;
struct tcphdr *th;
void *data_end;
data_end = (void *)(long)(skb->data_end);
th = (struct tcphdr *)(ip6h + 1);
if (th + 1 > data_end)
return TC_ACT_OK;
/* Is it the testing traffic? */
if (th->dest != srv_sa6.sin6_port)
return TC_ACT_OK;
tuple_len = sizeof(tuple->ipv6);
tuple = (struct bpf_sock_tuple *)&ip6h->saddr;
if ((void *)tuple + tuple_len > data_end) {
LOG();
return TC_ACT_OK;
}
bpf_skc = bpf_skc_lookup_tcp(skb, tuple, tuple_len,
BPF_F_CURRENT_NETNS, 0);
if (!bpf_skc) {
LOG();
return TC_ACT_OK;
}
if (bpf_skc->state == BPF_TCP_NEW_SYN_RECV) {
struct request_sock *req_sk;
req_sk = (struct request_sock *)bpf_skc_to_tcp_request_sock(bpf_skc);
if (!req_sk) {
LOG();
goto release;
}
if (bpf_sk_assign(skb, req_sk, 0)) {
LOG();
goto release;
}
req_sk_sport = req_sk->__req_common.skc_num;
bpf_sk_release(req_sk);
return TC_ACT_OK;
} else if (bpf_skc->state == BPF_TCP_LISTEN) {
struct tcp_sock *tp;
tp = bpf_skc_to_tcp_sock(bpf_skc);
if (!tp) {
LOG();
goto release;
}
if (bpf_sk_assign(skb, tp, 0)) {
LOG();
goto release;
}
listen_tp_sport = tp->inet_conn.icsk_inet.sk.__sk_common.skc_num;
test_syncookie_helper(ip6h, th, tp, skb);
bpf_sk_release(tp);
return TC_ACT_OK;
}
if (bpf_sk_assign(skb, bpf_skc, 0))
LOG();
release:
bpf_sk_release(bpf_skc);
return TC_ACT_OK;
}
SEC("classifier/ingress")
int cls_ingress(struct __sk_buff *skb)
{
struct ipv6hdr *ip6h;
struct ethhdr *eth;
void *data_end;
data_end = (void *)(long)(skb->data_end);
eth = (struct ethhdr *)(long)(skb->data);
if (eth + 1 > data_end)
return TC_ACT_OK;
if (eth->h_proto != bpf_htons(ETH_P_IPV6))
return TC_ACT_OK;
ip6h = (struct ipv6hdr *)(eth + 1);
if (ip6h + 1 > data_end)
return TC_ACT_OK;
if (ip6h->nexthdr == IPPROTO_TCP)
return handle_ip6_tcp(ip6h, skb);
return TC_ACT_OK;
}
char _license[] SEC("license") = "GPL";
...@@ -7,19 +7,7 @@ ...@@ -7,19 +7,7 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h> #include <bpf/bpf_endian.h>
#include "bpf_tcp_helpers.h"
enum bpf_addr_array_idx {
ADDR_SRV_IDX,
ADDR_CLI_IDX,
__NR_BPF_ADDR_ARRAY_IDX,
};
enum bpf_result_array_idx {
EGRESS_SRV_IDX,
EGRESS_CLI_IDX,
INGRESS_LISTEN_IDX,
__NR_BPF_RESULT_ARRAY_IDX,
};
enum bpf_linum_array_idx { enum bpf_linum_array_idx {
EGRESS_LINUM_IDX, EGRESS_LINUM_IDX,
...@@ -27,27 +15,6 @@ enum bpf_linum_array_idx { ...@@ -27,27 +15,6 @@ enum bpf_linum_array_idx {
__NR_BPF_LINUM_ARRAY_IDX, __NR_BPF_LINUM_ARRAY_IDX,
}; };
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, __NR_BPF_ADDR_ARRAY_IDX);
__type(key, __u32);
__type(value, struct sockaddr_in6);
} addr_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, __NR_BPF_RESULT_ARRAY_IDX);
__type(key, __u32);
__type(value, struct bpf_sock);
} sock_result_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, __NR_BPF_RESULT_ARRAY_IDX);
__type(key, __u32);
__type(value, struct bpf_tcp_sock);
} tcp_sock_result_map SEC(".maps");
struct { struct {
__uint(type, BPF_MAP_TYPE_ARRAY); __uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, __NR_BPF_LINUM_ARRAY_IDX); __uint(max_entries, __NR_BPF_LINUM_ARRAY_IDX);
...@@ -74,6 +41,17 @@ struct { ...@@ -74,6 +41,17 @@ struct {
__type(value, struct bpf_spinlock_cnt); __type(value, struct bpf_spinlock_cnt);
} sk_pkt_out_cnt10 SEC(".maps"); } sk_pkt_out_cnt10 SEC(".maps");
struct bpf_tcp_sock listen_tp = {};
struct sockaddr_in6 srv_sa6 = {};
struct bpf_tcp_sock cli_tp = {};
struct bpf_tcp_sock srv_tp = {};
struct bpf_sock listen_sk = {};
struct bpf_sock srv_sk = {};
struct bpf_sock cli_sk = {};
__u64 parent_cg_id = 0;
__u64 child_cg_id = 0;
__u64 lsndtime = 0;
static bool is_loopback6(__u32 *a6) static bool is_loopback6(__u32 *a6)
{ {
return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1); return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
...@@ -130,62 +108,86 @@ static void tpcpy(struct bpf_tcp_sock *dst, ...@@ -130,62 +108,86 @@ static void tpcpy(struct bpf_tcp_sock *dst,
dst->bytes_acked = src->bytes_acked; dst->bytes_acked = src->bytes_acked;
} }
#define RETURN { \ /* Always return CG_OK so that no pkt will be filtered out */
#define CG_OK 1
#define RET_LOG() ({ \
linum = __LINE__; \ linum = __LINE__; \
bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \ bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \
return 1; \ return CG_OK; \
} })
SEC("cgroup_skb/egress") SEC("cgroup_skb/egress")
int egress_read_sock_fields(struct __sk_buff *skb) int egress_read_sock_fields(struct __sk_buff *skb)
{ {
struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F }; struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F };
__u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10; struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10;
struct sockaddr_in6 *srv_sa6, *cli_sa6;
struct bpf_tcp_sock *tp, *tp_ret; struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret; struct bpf_sock *sk, *sk_ret;
__u32 linum, linum_idx; __u32 linum, linum_idx;
struct tcp_sock *ktp;
linum_idx = EGRESS_LINUM_IDX; linum_idx = EGRESS_LINUM_IDX;
sk = skb->sk; sk = skb->sk;
if (!sk || sk->state == 10) if (!sk)
RETURN; RET_LOG();
/* Not the testing egress traffic or
* TCP_LISTEN (10) socket will be copied at the ingress side.
*/
if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
sk->state == 10)
return CG_OK;
if (sk->src_port == bpf_ntohs(srv_sa6.sin6_port)) {
/* Server socket */
sk_ret = &srv_sk;
tp_ret = &srv_tp;
} else if (sk->dst_port == srv_sa6.sin6_port) {
/* Client socket */
sk_ret = &cli_sk;
tp_ret = &cli_tp;
} else {
/* Not the testing egress traffic */
return CG_OK;
}
/* It must be a fullsock for cgroup_skb/egress prog */
sk = bpf_sk_fullsock(sk); sk = bpf_sk_fullsock(sk);
if (!sk || sk->family != AF_INET6 || sk->protocol != IPPROTO_TCP || if (!sk)
!is_loopback6(sk->src_ip6)) RET_LOG();
RETURN;
/* Not the testing egress traffic */
if (sk->protocol != IPPROTO_TCP)
return CG_OK;
tp = bpf_tcp_sock(sk); tp = bpf_tcp_sock(sk);
if (!tp) if (!tp)
RETURN; RET_LOG();
srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx); skcpy(sk_ret, sk);
cli_sa6 = bpf_map_lookup_elem(&addr_map, &cli_idx); tpcpy(tp_ret, tp);
if (!srv_sa6 || !cli_sa6)
RETURN;
if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port)) if (sk_ret == &srv_sk) {
result_idx = EGRESS_SRV_IDX; ktp = bpf_skc_to_tcp_sock(sk);
else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
result_idx = EGRESS_CLI_IDX;
else
RETURN;
sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx); if (!ktp)
tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx); RET_LOG();
if (!sk_ret || !tp_ret)
RETURN;
skcpy(sk_ret, sk); lsndtime = ktp->lsndtime;
tpcpy(tp_ret, tp);
child_cg_id = bpf_sk_cgroup_id(ktp);
if (!child_cg_id)
RET_LOG();
parent_cg_id = bpf_sk_ancestor_cgroup_id(ktp, 2);
if (!parent_cg_id)
RET_LOG();
if (result_idx == EGRESS_SRV_IDX) {
/* The userspace has created it for srv sk */ /* The userspace has created it for srv sk */
pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk, 0, 0); pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, ktp, 0, 0);
pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10, sk, pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10, ktp,
0, 0); 0, 0);
} else { } else {
pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk, pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk,
...@@ -197,7 +199,7 @@ int egress_read_sock_fields(struct __sk_buff *skb) ...@@ -197,7 +199,7 @@ int egress_read_sock_fields(struct __sk_buff *skb)
} }
if (!pkt_out_cnt || !pkt_out_cnt10) if (!pkt_out_cnt || !pkt_out_cnt10)
RETURN; RET_LOG();
/* Even both cnt and cnt10 have lock defined in their BTF, /* Even both cnt and cnt10 have lock defined in their BTF,
* intentionally one cnt takes lock while one does not * intentionally one cnt takes lock while one does not
...@@ -208,48 +210,44 @@ int egress_read_sock_fields(struct __sk_buff *skb) ...@@ -208,48 +210,44 @@ int egress_read_sock_fields(struct __sk_buff *skb)
pkt_out_cnt10->cnt += 10; pkt_out_cnt10->cnt += 10;
bpf_spin_unlock(&pkt_out_cnt10->lock); bpf_spin_unlock(&pkt_out_cnt10->lock);
RETURN; return CG_OK;
} }
SEC("cgroup_skb/ingress") SEC("cgroup_skb/ingress")
int ingress_read_sock_fields(struct __sk_buff *skb) int ingress_read_sock_fields(struct __sk_buff *skb)
{ {
__u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX; struct bpf_tcp_sock *tp;
struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret;
struct sockaddr_in6 *srv_sa6;
__u32 linum, linum_idx; __u32 linum, linum_idx;
struct bpf_sock *sk;
linum_idx = INGRESS_LINUM_IDX; linum_idx = INGRESS_LINUM_IDX;
sk = skb->sk; sk = skb->sk;
if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6)) if (!sk)
RETURN; RET_LOG();
srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx); /* Not the testing ingress traffic to the server */
if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port)) if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
RETURN; sk->src_port != bpf_ntohs(srv_sa6.sin6_port))
return CG_OK;
if (sk->state != 10 && sk->state != 12) /* Only interested in TCP_LISTEN */
RETURN; if (sk->state != 10)
return CG_OK;
sk = bpf_get_listener_sock(sk); /* It must be a fullsock for cgroup_skb/ingress prog */
sk = bpf_sk_fullsock(sk);
if (!sk) if (!sk)
RETURN; RET_LOG();
tp = bpf_tcp_sock(sk); tp = bpf_tcp_sock(sk);
if (!tp) if (!tp)
RETURN; RET_LOG();
sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
if (!sk_ret || !tp_ret)
RETURN;
skcpy(sk_ret, sk); skcpy(&listen_sk, sk);
tpcpy(tp_ret, tp); tpcpy(&listen_tp, tp);
RETURN; return CG_OK;
} }
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
...@@ -854,3 +854,50 @@ ...@@ -854,3 +854,50 @@
.errstr = "Unreleased reference", .errstr = "Unreleased reference",
.result = REJECT, .result = REJECT,
}, },
{
"reference tracking: bpf_sk_release(btf_tcp_sock)",
.insns = {
BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_release),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_sk_release),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "unknown func",
},
{
"reference tracking: use ptr from bpf_skc_to_tcp_sock() after release",
.insns = {
BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_release),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_release),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "invalid mem access",
.result_unpriv = REJECT,
.errstr_unpriv = "unknown func",
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment