Commit 971e3057 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2021-04-08

The following pull-request contains BPF updates for your *net* tree.

We've added 4 non-merge commits during the last 2 day(s) which contain
a total of 4 files changed, 31 insertions(+), 10 deletions(-).

The main changes are:

1) Validate and reject invalid JIT branch displacements, from Piotr Krysiuk.

2) Fix incorrect unhash restore as well as fwd_alloc memory accounting in
   sock map, from John Fastabend.

====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ac075bdd 26f55a59
...@@ -1689,7 +1689,16 @@ st: if (is_imm8(insn->off)) ...@@ -1689,7 +1689,16 @@ st: if (is_imm8(insn->off))
} }
if (image) { if (image) {
if (unlikely(proglen + ilen > oldproglen)) { /*
* When populating the image, assert that:
*
* i) We do not write beyond the allocated space, and
* ii) addrs[i] did not change from the prior run, in order
* to validate assumptions made for computing branch
* displacements.
*/
if (unlikely(proglen + ilen > oldproglen ||
proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n"); pr_err("bpf_jit: fatal error\n");
return -EFAULT; return -EFAULT;
} }
......
...@@ -2276,7 +2276,16 @@ emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false); ...@@ -2276,7 +2276,16 @@ emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
} }
if (image) { if (image) {
if (unlikely(proglen + ilen > oldproglen)) { /*
* When populating the image, assert that:
*
* i) We do not write beyond the allocated space, and
* ii) addrs[i] did not change from the prior run, in order
* to validate assumptions made for computing branch
* displacements.
*/
if (unlikely(proglen + ilen > oldproglen ||
proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n"); pr_err("bpf_jit: fatal error\n");
return -EFAULT; return -EFAULT;
} }
......
...@@ -349,8 +349,13 @@ static inline void sk_psock_update_proto(struct sock *sk, ...@@ -349,8 +349,13 @@ static inline void sk_psock_update_proto(struct sock *sk,
static inline void sk_psock_restore_proto(struct sock *sk, static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock) struct sk_psock *psock)
{ {
sk->sk_prot->unhash = psock->saved_unhash;
if (inet_csk_has_ulp(sk)) { if (inet_csk_has_ulp(sk)) {
/* TLS does not have an unhash proto in SW cases, but we need
* to ensure we stop using the sock_map unhash routine because
* the associated psock is being removed. So use the original
* unhash handler.
*/
WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
} else { } else {
sk->sk_write_space = psock->saved_write_space; sk->sk_write_space = psock->saved_write_space;
......
...@@ -488,6 +488,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb ...@@ -488,6 +488,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
if (unlikely(!msg)) if (unlikely(!msg))
return -EAGAIN; return -EAGAIN;
sk_msg_init(msg); sk_msg_init(msg);
skb_set_owner_r(skb, sk);
return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
} }
...@@ -790,7 +791,6 @@ static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int ...@@ -790,7 +791,6 @@ static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int
{ {
switch (verdict) { switch (verdict) {
case __SK_REDIRECT: case __SK_REDIRECT:
skb_set_owner_r(skb, sk);
sk_psock_skb_redirect(skb); sk_psock_skb_redirect(skb);
break; break;
case __SK_PASS: case __SK_PASS:
...@@ -808,10 +808,6 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) ...@@ -808,10 +808,6 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
rcu_read_lock(); rcu_read_lock();
prog = READ_ONCE(psock->progs.skb_verdict); prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) { if (likely(prog)) {
/* We skip full set_owner_r here because if we do a SK_PASS
* or SK_DROP we can skip skb memory accounting and use the
* TLS context.
*/
skb->sk = psock->sk; skb->sk = psock->sk;
tcp_skb_bpf_redirect_clear(skb); tcp_skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
...@@ -880,12 +876,13 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) ...@@ -880,12 +876,13 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
goto out; goto out;
} }
skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.skb_verdict); prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) { if (likely(prog)) {
skb->sk = sk;
tcp_skb_bpf_redirect_clear(skb); tcp_skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
skb->sk = NULL;
} }
sk_psock_verdict_apply(psock, skb, ret); sk_psock_verdict_apply(psock, skb, ret);
out: out:
...@@ -956,12 +953,13 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb, ...@@ -956,12 +953,13 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
kfree_skb(skb); kfree_skb(skb);
goto out; goto out;
} }
skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.skb_verdict); prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) { if (likely(prog)) {
skb->sk = sk;
tcp_skb_bpf_redirect_clear(skb); tcp_skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
skb->sk = NULL;
} }
sk_psock_verdict_apply(psock, skb, ret); sk_psock_verdict_apply(psock, skb, ret);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment