Commit d5d4c363 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2022-06-09

We've added 6 non-merge commits during the last 2 day(s) which contain
a total of 8 files changed, 49 insertions(+), 15 deletions(-).

The main changes are:

1) Fix an illegal copy_to_user() attempt seen by syzkaller through arm64
   BPF JIT compiler, from Eric Dumazet.

2) Fix calling global functions from BPF_PROG_TYPE_EXT programs by using
   the correct program context type, from Toke Høiland-Jørgensen.

3) Fix XSK TX batching invalid descriptor handling, from Maciej Fijalkowski.

4) Fix potential integer overflows in multi-kprobe link code by using safer
   kvmalloc_array() allocation helpers, from Dan Carpenter.

5) Add Quentin as bpftool maintainer, from Quentin Monnet.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  MAINTAINERS: Add a maintainer for bpftool
  xsk: Fix handling of invalid descriptors in XSK TX batching API
  selftests/bpf: Add selftest for calling global functions from freplace
  bpf: Fix calling global functions from BPF_PROG_TYPE_EXT programs
  bpf: Use safer kvmalloc_array() where possible
  bpf, arm64: Clear prog->jited_len along prog->jited
====================

Link: https://lore.kernel.org/r/20220608234133.32265-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents a6958951 7c217aca
......@@ -3731,6 +3731,13 @@ F: include/linux/bpf_lsm.h
F: kernel/bpf/bpf_lsm.c
F: security/bpf/
BPFTOOL
M: Quentin Monnet <quentin@isovalent.com>
L: bpf@vger.kernel.org
S: Maintained
F: kernel/bpf/disasm.*
F: tools/bpf/bpftool/
BROADCOM B44 10/100 ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
......
......@@ -1478,6 +1478,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
goto out_off;
}
bpf_jit_binary_lock_ro(header);
......
......@@ -6054,6 +6054,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
struct bpf_reg_state *regs,
bool ptr_to_mem_ok)
{
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
struct bpf_verifier_log *log = &env->log;
u32 i, nargs, ref_id, ref_obj_id = 0;
bool is_kfunc = btf_is_kernel(btf);
......@@ -6171,7 +6172,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
return -EINVAL;
}
/* rest of the arguments can be anything, like normal kfunc */
} else if (btf_get_prog_ctx_type(log, btf, t, env->prog->type, i)) {
} else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
/* If function expects ctx type in BTF check that caller
* is passing PTR_TO_CTX.
*/
......
......@@ -2263,11 +2263,11 @@ static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32
int err = -ENOMEM;
unsigned int i;
syms = kvmalloc(cnt * sizeof(*syms), GFP_KERNEL);
syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
if (!syms)
goto error;
buf = kvmalloc(cnt * KSYM_NAME_LEN, GFP_KERNEL);
buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
if (!buf)
goto error;
......@@ -2464,7 +2464,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
return -EINVAL;
size = cnt * sizeof(*addrs);
addrs = kvmalloc(size, GFP_KERNEL);
addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
if (!addrs)
return -ENOMEM;
......@@ -2489,7 +2489,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
if (ucookies) {
cookies = kvmalloc(size, GFP_KERNEL);
cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
if (!cookies) {
err = -ENOMEM;
goto error;
......
......@@ -373,7 +373,8 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
goto out;
}
nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries);
max_entries = xskq_cons_nb_entries(xs->tx, max_entries);
nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, max_entries);
if (!nb_pkts) {
xs->tx->queue_empty_descs++;
goto out;
......@@ -389,7 +390,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
if (!nb_pkts)
goto out;
xskq_cons_release_n(xs->tx, nb_pkts);
xskq_cons_release_n(xs->tx, max_entries);
__xskq_cons_release(xs->tx);
xs->sk.sk_write_space(&xs->sk);
......
......@@ -282,14 +282,6 @@ static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
return xskq_cons_read_desc(q, desc, pool);
}
static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
u32 max)
{
u32 entries = xskq_cons_nb_entries(q, max);
return xskq_cons_read_desc_batch(q, pool, entries);
}
/* To improve performance in the xskq_cons_release functions, only update local state here.
* Reflect this to global state when we get new entries from the ring in
* xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
......
......@@ -395,6 +395,18 @@ static void test_func_map_prog_compatibility(void)
"./test_attach_probe.o");
}
static void test_func_replace_global_func(void)
{
const char *prog_name[] = {
"freplace/test_pkt_access",
};
test_fexit_bpf2bpf_common("./freplace_global_func.o",
"./test_pkt_access.o",
ARRAY_SIZE(prog_name),
prog_name, false, NULL);
}
/* NOTE: affect other tests, must run in serial mode */
void serial_test_fexit_bpf2bpf(void)
{
......@@ -416,4 +428,6 @@ void serial_test_fexit_bpf2bpf(void)
test_func_replace_multi();
if (test__start_subtest("fmod_ret_freplace"))
test_fmod_ret_freplace();
if (test__start_subtest("func_replace_global_func"))
test_func_replace_global_func();
}
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
__noinline
int test_ctx_global_func(struct __sk_buff *skb)
{
volatile int retval = 1;
return retval;
}
SEC("freplace/test_pkt_access")
int new_test_pkt_access(struct __sk_buff *skb)
{
return test_ctx_global_func(skb);
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment