Commit 547fd083 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2021-03-10

The following pull-request contains BPF updates for your *net* tree.

We've added 8 non-merge commits during the last 5 day(s) which contain
a total of 11 files changed, 136 insertions(+), 17 deletions(-).

The main changes are:

1) Reject bogus use of vmlinux BTF as map/prog creation BTF, from Alexei Starovoitov.

2) Fix allocation failure splat in x86 JIT for large progs. Also fix overwriting
   percpu cgroup storage from tracing programs when nested, from Yonghong Song.

3) Fix rx queue retrieval in XDP for multi-queue veth, from Maciej Fijalkowski.

4) Fix bpf_check_mtu() helper API before freeze to have mtu_len as custom skb/xdp
   L3 input length, from Jesper Dangaard Brouer.

5) Fix inode_storage's lookup_elem return value upon having bad fd, from Tal Lossos.

6) Fix bpftool and libbpf cross-build on MacOS, from Georgi Valkov.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 28259bac de920fc6
......@@ -2225,7 +2225,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
padding = true;
goto skip_init_addrs;
}
addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out_addrs;
......@@ -2317,7 +2317,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (image)
bpf_prog_fill_jited_linfo(prog, addrs + 1);
out_addrs:
kfree(addrs);
kvfree(addrs);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
......
......@@ -302,8 +302,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
rcv_xdp = rcu_access_pointer(rq->xdp_prog);
if (rcv_xdp)
skb_record_rx_queue(skb, rxq);
skb_record_rx_queue(skb, rxq);
}
skb_tx_timestamp(skb);
......
......@@ -1093,7 +1093,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
_ret; \
})
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
({ \
struct bpf_prog_array_item *_item; \
struct bpf_prog *_prog; \
......@@ -1106,7 +1106,8 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
goto _out; \
_item = &_array->items[0]; \
while ((_prog = READ_ONCE(_item->prog))) { \
bpf_cgroup_storage_set(_item->cgroup_storage); \
if (set_cg_storage) \
bpf_cgroup_storage_set(_item->cgroup_storage); \
_ret &= func(_prog, ctx); \
_item++; \
} \
......@@ -1153,10 +1154,10 @@ _out: \
})
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
__BPF_PROG_RUN_ARRAY(array, ctx, func, false)
__BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
__BPF_PROG_RUN_ARRAY(array, ctx, func, true)
__BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
......
......@@ -3850,7 +3850,7 @@ union bpf_attr {
*
* long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
* Description
* Check ctx packet size against exceeding MTU of net device (based
* Check packet size against exceeding MTU of net device (based
* on *ifindex*). This helper will likely be used in combination
* with helpers that adjust/change the packet size.
*
......@@ -3867,6 +3867,14 @@ union bpf_attr {
* against the current net device. This is practical if this isn't
* used prior to redirect.
*
* On input *mtu_len* must be a valid pointer, else verifier will
* reject BPF program. If the value *mtu_len* is initialized to
* zero then the ctx packet size is use. When value *mtu_len* is
* provided as input this specify the L3 length that the MTU check
* is done against. Remember XDP and TC length operate at L2, but
* this value is L3 as this correlate to MTU and IP-header tot_len
* values which are L3 (similar behavior as bpf_fib_lookup).
*
* The Linux kernel route table can configure MTUs on a more
* specific per route level, which is not provided by this helper.
* For route level MTU checks use the **bpf_fib_lookup**\ ()
......@@ -3891,11 +3899,9 @@ union bpf_attr {
*
* On return *mtu_len* pointer contains the MTU value of the net
* device. Remember the net device configured MTU is the L3 size,
* which is returned here and XDP and TX length operate at L2.
* which is returned here and XDP and TC length operate at L2.
* Helper take this into account for you, but remember when using
* MTU value in your BPF-code. On input *mtu_len* must be a valid
* pointer and be initialized (to zero), else verifier will reject
* BPF program.
* MTU value in your BPF-code.
*
* Return
* * 0 on success, and populate MTU value in *mtu_len* pointer.
......
......@@ -109,7 +109,7 @@ static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
fd = *(int *)key;
f = fget_raw(fd);
if (!f)
return NULL;
return ERR_PTR(-EBADF);
sdata = inode_storage_lookup(f->f_inode, map, true);
fput(f);
......
......@@ -854,6 +854,11 @@ static int map_create(union bpf_attr *attr)
err = PTR_ERR(btf);
goto free_map;
}
if (btf_is_kernel(btf)) {
btf_put(btf);
err = -EACCES;
goto free_map;
}
map->btf = btf;
if (attr->btf_value_type_id) {
......
......@@ -9056,6 +9056,10 @@ static int check_btf_info(struct bpf_verifier_env *env,
btf = btf_get_by_fd(attr->prog_btf_fd);
if (IS_ERR(btf))
return PTR_ERR(btf);
if (btf_is_kernel(btf)) {
btf_put(btf);
return -EACCES;
}
env->prog->aux->btf = btf;
err = check_btf_func(env, attr, uattr);
......
......@@ -5658,7 +5658,7 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
return -EINVAL;
if (unlikely(flags & BPF_MTU_CHK_SEGS && len_diff))
if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
return -EINVAL;
dev = __dev_via_ifindex(dev, ifindex);
......@@ -5668,7 +5668,11 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
mtu = READ_ONCE(dev->mtu);
dev_len = mtu + dev->hard_header_len;
skb_len = skb->len + len_diff; /* minus result pass check */
/* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len;
skb_len += len_diff; /* minus result pass check */
if (skb_len <= dev_len) {
ret = BPF_MTU_CHK_RET_SUCCESS;
goto out;
......@@ -5713,6 +5717,10 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
/* Add L2-header as dev MTU is L3 size */
dev_len = mtu + dev->hard_header_len;
/* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
if (*mtu_len)
xdp_len = *mtu_len + dev->hard_header_len;
xdp_len += len_diff; /* minus result pass check */
if (xdp_len > dev_len)
ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
......
......@@ -215,7 +215,7 @@ define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
fi; \
$(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
$(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
endef
install_lib: all_cmd
......
......@@ -128,6 +128,8 @@ static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex)
test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu);
test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu);
test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu);
test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu);
test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu);
cleanup:
test_check_mtu__destroy(skel);
......@@ -187,6 +189,8 @@ static void test_check_mtu_tc(__u32 mtu, __u32 ifindex)
test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu_da, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_minus_delta, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_input_len, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_input_len_exceed, mtu);
cleanup:
test_check_mtu__destroy(skel);
}
......
......@@ -105,6 +105,54 @@ int xdp_minus_delta(struct xdp_md *ctx)
return retval;
}
SEC("xdp")
int xdp_input_len(struct xdp_md *ctx)
{
int retval = XDP_PASS; /* Expected retval on successful test */
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
__u32 ifindex = GLOBAL_USER_IFINDEX;
__u32 data_len = data_end - data;
/* API allow user give length to check as input via mtu_len param,
* resulting MTU value is still output in mtu_len param after call.
*
* Input len is L3, like MTU and iph->tot_len.
* Remember XDP data_len is L2.
*/
__u32 mtu_len = data_len - ETH_HLEN;
if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
retval = XDP_ABORTED;
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("xdp")
int xdp_input_len_exceed(struct xdp_md *ctx)
{
int retval = XDP_ABORTED; /* Fail */
__u32 ifindex = GLOBAL_USER_IFINDEX;
int err;
/* API allow user give length to check as input via mtu_len param,
* resulting MTU value is still output in mtu_len param after call.
*
* Input length value is L3 size like MTU.
*/
__u32 mtu_len = GLOBAL_USER_MTU;
mtu_len += 1; /* Exceed with 1 */
err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
retval = XDP_PASS ; /* Success in exceeding MTU check */
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("classifier")
int tc_use_helper(struct __sk_buff *ctx)
{
......@@ -196,3 +244,47 @@ int tc_minus_delta(struct __sk_buff *ctx)
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("classifier")
int tc_input_len(struct __sk_buff *ctx)
{
int retval = BPF_OK; /* Expected retval on successful test */
__u32 ifindex = GLOBAL_USER_IFINDEX;
/* API allow user give length to check as input via mtu_len param,
* resulting MTU value is still output in mtu_len param after call.
*
* Input length value is L3 size.
*/
__u32 mtu_len = GLOBAL_USER_MTU;
if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
retval = BPF_DROP;
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("classifier")
int tc_input_len_exceed(struct __sk_buff *ctx)
{
int retval = BPF_DROP; /* Fail */
__u32 ifindex = GLOBAL_USER_IFINDEX;
int err;
/* API allow user give length to check as input via mtu_len param,
* resulting MTU value is still output in mtu_len param after call.
*
* Input length value is L3 size like MTU.
*/
__u32 mtu_len = GLOBAL_USER_MTU;
mtu_len += 1; /* Exceed with 1 */
err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
retval = BPF_OK; /* Success in exceeding MTU check */
global_bpf_mtu_xdp = mtu_len;
return retval;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment