Commit 0548c5f2 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
bpf 2023-01-27

We've added 10 non-merge commits during the last 9 day(s) which contain
a total of 10 files changed, 170 insertions(+), 59 deletions(-).

The main changes are:

1) Fix preservation of register's parent/live fields when copying
   range-info, from Eduard Zingerman.

2) Fix an off-by-one bug in bpf_mem_cache_idx() to select the right
   cache, from Hou Tao.

3) Fix stack overflow from infinite recursion in sock_map_close(),
   from Jakub Sitnicki.

4) Fix missing btf_put() in register_btf_id_dtor_kfuncs()'s error path,
   from Jiri Olsa.

5) Fix a splat from bpf_setsockopt() via lsm_cgroup/socket_sock_rcv_skb,
   from Kui-Feng Lee.

6) Fix bpf_send_signal[_thread]() helpers to hold a reference on the task,
   from Yonghong Song.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Fix the kernel crash caused by bpf_setsockopt().
  selftests/bpf: Cover listener cloning with progs attached to sockmap
  selftests/bpf: Pass BPF skeleton to sockmap_listen ops tests
  bpf, sockmap: Check for any of tcp_bpf_prots when cloning a listener
  bpf, sockmap: Don't let sock_map_{close,destroy,unhash} call itself
  bpf: Add missing btf_put to register_btf_id_dtor_kfuncs
  selftests/bpf: Verify copy_register_state() preserves parent/live fields
  bpf: Fix to preserve reg parent/live fields when copying range info
  bpf: Fix a possible task gone issue with bpf_send_signal[_thread]() helpers
  bpf: Fix off-by-one error in bpf_mem_cache_idx()
====================

Link: https://lore.kernel.org/r/20230127215820.4993-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 7d2c89b3 5416c9ae
...@@ -38,4 +38,16 @@ ...@@ -38,4 +38,16 @@
*/ */
#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=) #define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
/**
* is_insidevar - check if the @ptr points inside the @var memory range.
* @ptr: the pointer to a memory address.
* @var: the variable which address and size identify the memory range.
*
* Evaluates to true if the address in @ptr lies within the memory
* range allocated to @var.
*/
#define is_insidevar(ptr, var) \
((uintptr_t)(ptr) >= (uintptr_t)(var) && \
(uintptr_t)(ptr) < (uintptr_t)(var) + sizeof(var))
#endif #endif
...@@ -51,7 +51,6 @@ BTF_SET_END(bpf_lsm_current_hooks) ...@@ -51,7 +51,6 @@ BTF_SET_END(bpf_lsm_current_hooks)
*/ */
BTF_SET_START(bpf_lsm_locked_sockopt_hooks) BTF_SET_START(bpf_lsm_locked_sockopt_hooks)
#ifdef CONFIG_SECURITY_NETWORK #ifdef CONFIG_SECURITY_NETWORK
BTF_ID(func, bpf_lsm_socket_sock_rcv_skb)
BTF_ID(func, bpf_lsm_sock_graft) BTF_ID(func, bpf_lsm_sock_graft)
BTF_ID(func, bpf_lsm_inet_csk_clone) BTF_ID(func, bpf_lsm_inet_csk_clone)
BTF_ID(func, bpf_lsm_inet_conn_established) BTF_ID(func, bpf_lsm_inet_conn_established)
......
...@@ -7782,9 +7782,9 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c ...@@ -7782,9 +7782,9 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c
sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL); sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
return 0;
end: end:
btf_free_dtor_kfunc_tab(btf); if (ret)
btf_free_dtor_kfunc_tab(btf);
btf_put(btf); btf_put(btf);
return ret; return ret;
} }
......
...@@ -71,7 +71,7 @@ static int bpf_mem_cache_idx(size_t size) ...@@ -71,7 +71,7 @@ static int bpf_mem_cache_idx(size_t size)
if (size <= 192) if (size <= 192)
return size_index[(size - 1) / 8] - 1; return size_index[(size - 1) / 8] - 1;
return fls(size - 1) - 1; return fls(size - 1) - 2;
} }
#define NUM_CACHES 11 #define NUM_CACHES 11
......
...@@ -3243,13 +3243,24 @@ static bool __is_pointer_value(bool allow_ptr_leaks, ...@@ -3243,13 +3243,24 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
return reg->type != SCALAR_VALUE; return reg->type != SCALAR_VALUE;
} }
/* Copy src state preserving dst->parent and dst->live fields */
static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
{
struct bpf_reg_state *parent = dst->parent;
enum bpf_reg_liveness live = dst->live;
*dst = *src;
dst->parent = parent;
dst->live = live;
}
static void save_register_state(struct bpf_func_state *state, static void save_register_state(struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg, int spi, struct bpf_reg_state *reg,
int size) int size)
{ {
int i; int i;
state->stack[spi].spilled_ptr = *reg; copy_register_state(&state->stack[spi].spilled_ptr, reg);
if (size == BPF_REG_SIZE) if (size == BPF_REG_SIZE)
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
...@@ -3577,7 +3588,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, ...@@ -3577,7 +3588,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
*/ */
s32 subreg_def = state->regs[dst_regno].subreg_def; s32 subreg_def = state->regs[dst_regno].subreg_def;
state->regs[dst_regno] = *reg; copy_register_state(&state->regs[dst_regno], reg);
state->regs[dst_regno].subreg_def = subreg_def; state->regs[dst_regno].subreg_def = subreg_def;
} else { } else {
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
...@@ -3598,7 +3609,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, ...@@ -3598,7 +3609,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
if (dst_regno >= 0) { if (dst_regno >= 0) {
/* restore register state from stack */ /* restore register state from stack */
state->regs[dst_regno] = *reg; copy_register_state(&state->regs[dst_regno], reg);
/* mark reg as written since spilled pointer state likely /* mark reg as written since spilled pointer state likely
* has its liveness marks cleared by is_state_visited() * has its liveness marks cleared by is_state_visited()
* which resets stack/reg liveness for state transitions * which resets stack/reg liveness for state transitions
...@@ -9592,7 +9603,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, ...@@ -9592,7 +9603,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
*/ */
if (!ptr_is_dst_reg) { if (!ptr_is_dst_reg) {
tmp = *dst_reg; tmp = *dst_reg;
*dst_reg = *ptr_reg; copy_register_state(dst_reg, ptr_reg);
} }
ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
env->insn_idx); env->insn_idx);
...@@ -10845,7 +10856,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) ...@@ -10845,7 +10856,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
* to propagate min/max range. * to propagate min/max range.
*/ */
src_reg->id = ++env->id_gen; src_reg->id = ++env->id_gen;
*dst_reg = *src_reg; copy_register_state(dst_reg, src_reg);
dst_reg->live |= REG_LIVE_WRITTEN; dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = DEF_NOT_SUBREG; dst_reg->subreg_def = DEF_NOT_SUBREG;
} else { } else {
...@@ -10856,7 +10867,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) ...@@ -10856,7 +10867,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
insn->src_reg); insn->src_reg);
return -EACCES; return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) { } else if (src_reg->type == SCALAR_VALUE) {
*dst_reg = *src_reg; copy_register_state(dst_reg, src_reg);
/* Make sure ID is cleared otherwise /* Make sure ID is cleared otherwise
* dst_reg min/max could be incorrectly * dst_reg min/max could be incorrectly
* propagated into src_reg by find_equal_scalars() * propagated into src_reg by find_equal_scalars()
...@@ -11655,7 +11666,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate, ...@@ -11655,7 +11666,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
bpf_for_each_reg_in_vstate(vstate, state, reg, ({ bpf_for_each_reg_in_vstate(vstate, state, reg, ({
if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
*reg = *known_reg; copy_register_state(reg, known_reg);
})); }));
} }
......
...@@ -833,6 +833,7 @@ static void do_bpf_send_signal(struct irq_work *entry) ...@@ -833,6 +833,7 @@ static void do_bpf_send_signal(struct irq_work *entry)
work = container_of(entry, struct send_signal_irq_work, irq_work); work = container_of(entry, struct send_signal_irq_work, irq_work);
group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
put_task_struct(work->task);
} }
static int bpf_send_signal_common(u32 sig, enum pid_type type) static int bpf_send_signal_common(u32 sig, enum pid_type type)
...@@ -867,7 +868,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) ...@@ -867,7 +868,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
* to the irq_work. The current task may change when queued * to the irq_work. The current task may change when queued
* irq works get executed. * irq works get executed.
*/ */
work->task = current; work->task = get_task_struct(current);
work->sig = sig; work->sig = sig;
work->type = type; work->type = type;
irq_work_queue(&work->irq_work); irq_work_queue(&work->irq_work);
......
...@@ -1569,15 +1569,16 @@ void sock_map_unhash(struct sock *sk) ...@@ -1569,15 +1569,16 @@ void sock_map_unhash(struct sock *sk)
psock = sk_psock(sk); psock = sk_psock(sk);
if (unlikely(!psock)) { if (unlikely(!psock)) {
rcu_read_unlock(); rcu_read_unlock();
if (sk->sk_prot->unhash) saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
sk->sk_prot->unhash(sk); } else {
return; saved_unhash = psock->saved_unhash;
sock_map_remove_links(sk, psock);
rcu_read_unlock();
} }
if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
saved_unhash = psock->saved_unhash; return;
sock_map_remove_links(sk, psock); if (saved_unhash)
rcu_read_unlock(); saved_unhash(sk);
saved_unhash(sk);
} }
EXPORT_SYMBOL_GPL(sock_map_unhash); EXPORT_SYMBOL_GPL(sock_map_unhash);
...@@ -1590,17 +1591,18 @@ void sock_map_destroy(struct sock *sk) ...@@ -1590,17 +1591,18 @@ void sock_map_destroy(struct sock *sk)
psock = sk_psock_get(sk); psock = sk_psock_get(sk);
if (unlikely(!psock)) { if (unlikely(!psock)) {
rcu_read_unlock(); rcu_read_unlock();
if (sk->sk_prot->destroy) saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
sk->sk_prot->destroy(sk); } else {
return; saved_destroy = psock->saved_destroy;
sock_map_remove_links(sk, psock);
rcu_read_unlock();
sk_psock_stop(psock);
sk_psock_put(sk, psock);
} }
if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
saved_destroy = psock->saved_destroy; return;
sock_map_remove_links(sk, psock); if (saved_destroy)
rcu_read_unlock(); saved_destroy(sk);
sk_psock_stop(psock);
sk_psock_put(sk, psock);
saved_destroy(sk);
} }
EXPORT_SYMBOL_GPL(sock_map_destroy); EXPORT_SYMBOL_GPL(sock_map_destroy);
...@@ -1615,16 +1617,21 @@ void sock_map_close(struct sock *sk, long timeout) ...@@ -1615,16 +1617,21 @@ void sock_map_close(struct sock *sk, long timeout)
if (unlikely(!psock)) { if (unlikely(!psock)) {
rcu_read_unlock(); rcu_read_unlock();
release_sock(sk); release_sock(sk);
return sk->sk_prot->close(sk, timeout); saved_close = READ_ONCE(sk->sk_prot)->close;
} else {
saved_close = psock->saved_close;
sock_map_remove_links(sk, psock);
rcu_read_unlock();
sk_psock_stop(psock);
release_sock(sk);
cancel_work_sync(&psock->work);
sk_psock_put(sk, psock);
} }
/* Make sure we do not recurse. This is a bug.
saved_close = psock->saved_close; * Leak the socket instead of crashing on a stack overflow.
sock_map_remove_links(sk, psock); */
rcu_read_unlock(); if (WARN_ON_ONCE(saved_close == sock_map_close))
sk_psock_stop(psock); return;
release_sock(sk);
cancel_work_sync(&psock->work);
sk_psock_put(sk, psock);
saved_close(sk, timeout); saved_close(sk, timeout);
} }
EXPORT_SYMBOL_GPL(sock_map_close); EXPORT_SYMBOL_GPL(sock_map_close);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/util_macros.h>
#include <net/inet_common.h> #include <net/inet_common.h>
#include <net/tls.h> #include <net/tls.h>
...@@ -639,10 +640,9 @@ EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); ...@@ -639,10 +640,9 @@ EXPORT_SYMBOL_GPL(tcp_bpf_update_proto);
*/ */
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
{ {
int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
struct proto *prot = newsk->sk_prot; struct proto *prot = newsk->sk_prot;
if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE]) if (is_insidevar(prot, tcp_bpf_prots))
newsk->sk_prot = sk->sk_prot_creator; newsk->sk_prot = sk->sk_prot_creator;
} }
#endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_BPF_SYSCALL */
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#define MAX_STRERR_LEN 256 #define MAX_STRERR_LEN 256
#define MAX_TEST_NAME 80 #define MAX_TEST_NAME 80
#define __always_unused __attribute__((__unused__))
#define _FAIL(errnum, fmt...) \ #define _FAIL(errnum, fmt...) \
({ \ ({ \
error_at_line(0, (errnum), __func__, __LINE__, fmt); \ error_at_line(0, (errnum), __func__, __LINE__, fmt); \
...@@ -321,7 +323,8 @@ static int socket_loopback(int family, int sotype) ...@@ -321,7 +323,8 @@ static int socket_loopback(int family, int sotype)
return socket_loopback_reuseport(family, sotype, -1); return socket_loopback_reuseport(family, sotype, -1);
} }
static void test_insert_invalid(int family, int sotype, int mapfd) static void test_insert_invalid(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
u32 key = 0; u32 key = 0;
u64 value; u64 value;
...@@ -338,7 +341,8 @@ static void test_insert_invalid(int family, int sotype, int mapfd) ...@@ -338,7 +341,8 @@ static void test_insert_invalid(int family, int sotype, int mapfd)
FAIL_ERRNO("map_update: expected EBADF"); FAIL_ERRNO("map_update: expected EBADF");
} }
static void test_insert_opened(int family, int sotype, int mapfd) static void test_insert_opened(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
u32 key = 0; u32 key = 0;
u64 value; u64 value;
...@@ -359,7 +363,8 @@ static void test_insert_opened(int family, int sotype, int mapfd) ...@@ -359,7 +363,8 @@ static void test_insert_opened(int family, int sotype, int mapfd)
xclose(s); xclose(s);
} }
static void test_insert_bound(int family, int sotype, int mapfd) static void test_insert_bound(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
struct sockaddr_storage addr; struct sockaddr_storage addr;
socklen_t len; socklen_t len;
...@@ -386,7 +391,8 @@ static void test_insert_bound(int family, int sotype, int mapfd) ...@@ -386,7 +391,8 @@ static void test_insert_bound(int family, int sotype, int mapfd)
xclose(s); xclose(s);
} }
static void test_insert(int family, int sotype, int mapfd) static void test_insert(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
u64 value; u64 value;
u32 key; u32 key;
...@@ -402,7 +408,8 @@ static void test_insert(int family, int sotype, int mapfd) ...@@ -402,7 +408,8 @@ static void test_insert(int family, int sotype, int mapfd)
xclose(s); xclose(s);
} }
static void test_delete_after_insert(int family, int sotype, int mapfd) static void test_delete_after_insert(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
u64 value; u64 value;
u32 key; u32 key;
...@@ -419,7 +426,8 @@ static void test_delete_after_insert(int family, int sotype, int mapfd) ...@@ -419,7 +426,8 @@ static void test_delete_after_insert(int family, int sotype, int mapfd)
xclose(s); xclose(s);
} }
static void test_delete_after_close(int family, int sotype, int mapfd) static void test_delete_after_close(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
int err, s; int err, s;
u64 value; u64 value;
...@@ -442,7 +450,8 @@ static void test_delete_after_close(int family, int sotype, int mapfd) ...@@ -442,7 +450,8 @@ static void test_delete_after_close(int family, int sotype, int mapfd)
FAIL_ERRNO("map_delete: expected EINVAL/EINVAL"); FAIL_ERRNO("map_delete: expected EINVAL/EINVAL");
} }
static void test_lookup_after_insert(int family, int sotype, int mapfd) static void test_lookup_after_insert(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
u64 cookie, value; u64 cookie, value;
socklen_t len; socklen_t len;
...@@ -470,7 +479,8 @@ static void test_lookup_after_insert(int family, int sotype, int mapfd) ...@@ -470,7 +479,8 @@ static void test_lookup_after_insert(int family, int sotype, int mapfd)
xclose(s); xclose(s);
} }
static void test_lookup_after_delete(int family, int sotype, int mapfd) static void test_lookup_after_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
int err, s; int err, s;
u64 value; u64 value;
...@@ -493,7 +503,8 @@ static void test_lookup_after_delete(int family, int sotype, int mapfd) ...@@ -493,7 +503,8 @@ static void test_lookup_after_delete(int family, int sotype, int mapfd)
xclose(s); xclose(s);
} }
static void test_lookup_32_bit_value(int family, int sotype, int mapfd) static void test_lookup_32_bit_value(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
u32 key, value32; u32 key, value32;
int err, s; int err, s;
...@@ -523,7 +534,8 @@ static void test_lookup_32_bit_value(int family, int sotype, int mapfd) ...@@ -523,7 +534,8 @@ static void test_lookup_32_bit_value(int family, int sotype, int mapfd)
xclose(s); xclose(s);
} }
static void test_update_existing(int family, int sotype, int mapfd) static void test_update_existing(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
int s1, s2; int s1, s2;
u64 value; u64 value;
...@@ -551,7 +563,7 @@ static void test_update_existing(int family, int sotype, int mapfd) ...@@ -551,7 +563,7 @@ static void test_update_existing(int family, int sotype, int mapfd)
/* Exercise the code path where we destroy child sockets that never /* Exercise the code path where we destroy child sockets that never
* got accept()'ed, aka orphans, when parent socket gets closed. * got accept()'ed, aka orphans, when parent socket gets closed.
*/ */
static void test_destroy_orphan_child(int family, int sotype, int mapfd) static void do_destroy_orphan_child(int family, int sotype, int mapfd)
{ {
struct sockaddr_storage addr; struct sockaddr_storage addr;
socklen_t len; socklen_t len;
...@@ -582,10 +594,38 @@ static void test_destroy_orphan_child(int family, int sotype, int mapfd) ...@@ -582,10 +594,38 @@ static void test_destroy_orphan_child(int family, int sotype, int mapfd)
xclose(s); xclose(s);
} }
static void test_destroy_orphan_child(struct test_sockmap_listen *skel,
int family, int sotype, int mapfd)
{
int msg_verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
int skb_verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
const struct test {
int progfd;
enum bpf_attach_type atype;
} tests[] = {
{ -1, -1 },
{ msg_verdict, BPF_SK_MSG_VERDICT },
{ skb_verdict, BPF_SK_SKB_VERDICT },
};
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
if (t->progfd != -1 &&
xbpf_prog_attach(t->progfd, mapfd, t->atype, 0) != 0)
return;
do_destroy_orphan_child(family, sotype, mapfd);
if (t->progfd != -1)
xbpf_prog_detach2(t->progfd, mapfd, t->atype);
}
}
/* Perform a passive open after removing listening socket from SOCKMAP /* Perform a passive open after removing listening socket from SOCKMAP
* to ensure that callbacks get restored properly. * to ensure that callbacks get restored properly.
*/ */
static void test_clone_after_delete(int family, int sotype, int mapfd) static void test_clone_after_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
struct sockaddr_storage addr; struct sockaddr_storage addr;
socklen_t len; socklen_t len;
...@@ -621,7 +661,8 @@ static void test_clone_after_delete(int family, int sotype, int mapfd) ...@@ -621,7 +661,8 @@ static void test_clone_after_delete(int family, int sotype, int mapfd)
* SOCKMAP, but got accept()'ed only after the parent has been removed * SOCKMAP, but got accept()'ed only after the parent has been removed
* from SOCKMAP, gets cloned without parent psock state or callbacks. * from SOCKMAP, gets cloned without parent psock state or callbacks.
*/ */
static void test_accept_after_delete(int family, int sotype, int mapfd) static void test_accept_after_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
struct sockaddr_storage addr; struct sockaddr_storage addr;
const u32 zero = 0; const u32 zero = 0;
...@@ -675,7 +716,8 @@ static void test_accept_after_delete(int family, int sotype, int mapfd) ...@@ -675,7 +716,8 @@ static void test_accept_after_delete(int family, int sotype, int mapfd)
/* Check that child socket that got created and accepted while parent /* Check that child socket that got created and accepted while parent
* was in a SOCKMAP is cloned without parent psock state or callbacks. * was in a SOCKMAP is cloned without parent psock state or callbacks.
*/ */
static void test_accept_before_delete(int family, int sotype, int mapfd) static void test_accept_before_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
struct sockaddr_storage addr; struct sockaddr_storage addr;
const u32 zero = 0, one = 1; const u32 zero = 0, one = 1;
...@@ -784,7 +826,8 @@ static void *connect_accept_thread(void *arg) ...@@ -784,7 +826,8 @@ static void *connect_accept_thread(void *arg)
return NULL; return NULL;
} }
static void test_syn_recv_insert_delete(int family, int sotype, int mapfd) static void test_syn_recv_insert_delete(struct test_sockmap_listen *skel __always_unused,
int family, int sotype, int mapfd)
{ {
struct connect_accept_ctx ctx = { 0 }; struct connect_accept_ctx ctx = { 0 };
struct sockaddr_storage addr; struct sockaddr_storage addr;
...@@ -847,7 +890,8 @@ static void *listen_thread(void *arg) ...@@ -847,7 +890,8 @@ static void *listen_thread(void *arg)
return NULL; return NULL;
} }
static void test_race_insert_listen(int family, int socktype, int mapfd) static void test_race_insert_listen(struct test_sockmap_listen *skel __always_unused,
int family, int socktype, int mapfd)
{ {
struct connect_accept_ctx ctx = { 0 }; struct connect_accept_ctx ctx = { 0 };
const u32 zero = 0; const u32 zero = 0;
...@@ -1473,7 +1517,8 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map, ...@@ -1473,7 +1517,8 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
int family, int sotype) int family, int sotype)
{ {
const struct op_test { const struct op_test {
void (*fn)(int family, int sotype, int mapfd); void (*fn)(struct test_sockmap_listen *skel,
int family, int sotype, int mapfd);
const char *name; const char *name;
int sotype; int sotype;
} tests[] = { } tests[] = {
...@@ -1520,7 +1565,7 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map, ...@@ -1520,7 +1565,7 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
if (!test__start_subtest(s)) if (!test__start_subtest(s))
continue; continue;
t->fn(family, sotype, map_fd); t->fn(skel, family, sotype, map_fd);
test_ops_cleanup(map); test_ops_cleanup(map);
} }
} }
......
...@@ -225,3 +225,39 @@ ...@@ -225,3 +225,39 @@
.result_unpriv = ACCEPT, .result_unpriv = ACCEPT,
.insn_processed = 15, .insn_processed = 15,
}, },
/* The test performs a conditional 64-bit write to a stack location
* fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
* then data is read from fp[-8]. This sequence is unsafe.
*
* The test would be mistakenly marked as safe w/o dst register parent
* preservation in verifier.c:copy_register_state() function.
*
* Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
* checkpoint state after conditional 64-bit assignment.
*/
{
"write tracking and register parent chain bug",
.insns = {
/* r6 = ktime_get_ns() */
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
/* r0 = ktime_get_ns() */
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
/* if r0 > r6 goto +1 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
/* *(u64 *)(r10 - 8) = 0xdeadbeef */
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
/* r1 = 42 */
BPF_MOV64_IMM(BPF_REG_1, 42),
/* *(u8 *)(r10 - 8) = r1 */
BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
/* r2 = *(u64 *)(r10 - 8) */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
/* exit(0) */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "invalid read from stack off -8+1 size 8",
.result = REJECT,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment