Commit 347cb5de authored by Jakub Kicinski's avatar Jakub Kicinski

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2022-04-27

We've added 5 non-merge commits during the last 20 day(s) which contain
a total of 6 files changed, 34 insertions(+), 12 deletions(-).

The main changes are:

1) Fix xsk sockets when rx and tx are separately bound to the same umem, also
   fix xsk copy mode combined with busy poll, from Maciej Fijalkowski.

2) Fix BPF tunnel/collect_md helpers with bpf_xmit lwt hook usage which triggered
   a crash due to invalid metadata_dst access, from Eyal Birger.

3) Fix release of page pool in XDP live packet mode, from Toke Høiland-Jørgensen.

4) Fix potential NULL pointer dereference in kretprobes, from Adam Zabrocki.

   (Masami & Steven preferred this small fix to be routed via bpf tree given it's
    follow-up fix to Masami's rethook work that went via bpf earlier, too.)

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  xsk: Fix possible crash when multiple sockets are created
  kprobes: Fix KRETPROBES when CONFIG_KRETPROBE_ON_RETHOOK is set
  bpf, lwt: Fix crash when using bpf_skb_set_tunnel_key() from bpf_xmit lwt hook
  bpf: Fix release of page_pool in BPF_PROG_RUN in test runner
  xsk: Fix l2fwd for copy mode + busy poll combo
====================

Link: https://lore.kernel.org/r/20220427212748.9576-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 7b5148be ba3beec2
...@@ -97,6 +97,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, ...@@ -97,6 +97,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
u16 queue_id, u16 flags); u16 queue_id, u16 flags);
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id); struct net_device *dev, u16 queue_id);
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_destroy(struct xsk_buff_pool *pool); void xp_destroy(struct xsk_buff_pool *pool);
void xp_get_pool(struct xsk_buff_pool *pool); void xp_get_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool); bool xp_put_pool(struct xsk_buff_pool *pool);
......
...@@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data, ...@@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
struct kprobe_ctlblk *kcb; struct kprobe_ctlblk *kcb;
/* The data must NOT be null. This means rethook data structure is broken. */ /* The data must NOT be null. This means rethook data structure is broken. */
if (WARN_ON_ONCE(!data)) if (WARN_ON_ONCE(!data) || !rp->handler)
return; return;
__this_cpu_write(current_kprobe, &rp->kp); __this_cpu_write(current_kprobe, &rp->kp);
......
...@@ -108,6 +108,7 @@ struct xdp_test_data { ...@@ -108,6 +108,7 @@ struct xdp_test_data {
struct page_pool *pp; struct page_pool *pp;
struct xdp_frame **frames; struct xdp_frame **frames;
struct sk_buff **skbs; struct sk_buff **skbs;
struct xdp_mem_info mem;
u32 batch_size; u32 batch_size;
u32 frame_cnt; u32 frame_cnt;
}; };
...@@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg) ...@@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
{ {
struct xdp_mem_info mem = {};
struct page_pool *pp; struct page_pool *pp;
int err = -ENOMEM; int err = -ENOMEM;
struct page_pool_params pp_params = { struct page_pool_params pp_params = {
...@@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c ...@@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
} }
/* will copy 'mem.id' into pp->xdp_mem_id */ /* will copy 'mem.id' into pp->xdp_mem_id */
err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp); err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
if (err) if (err)
goto err_mmodel; goto err_mmodel;
...@@ -202,6 +202,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c ...@@ -202,6 +202,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
static void xdp_test_run_teardown(struct xdp_test_data *xdp) static void xdp_test_run_teardown(struct xdp_test_data *xdp)
{ {
xdp_unreg_mem_model(&xdp->mem);
page_pool_destroy(xdp->pp); page_pool_destroy(xdp->pp);
kfree(xdp->frames); kfree(xdp->frames);
kfree(xdp->skbs); kfree(xdp->skbs);
......
...@@ -159,10 +159,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb) ...@@ -159,10 +159,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return dst->lwtstate->orig_output(net, sk, skb); return dst->lwtstate->orig_output(net, sk, skb);
} }
static int xmit_check_hhlen(struct sk_buff *skb) static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
{ {
int hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len) { if (skb_headroom(skb) < hh_len) {
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
...@@ -274,6 +272,7 @@ static int bpf_xmit(struct sk_buff *skb) ...@@ -274,6 +272,7 @@ static int bpf_xmit(struct sk_buff *skb)
bpf = bpf_lwt_lwtunnel(dst->lwtstate); bpf = bpf_lwt_lwtunnel(dst->lwtstate);
if (bpf->xmit.prog) { if (bpf->xmit.prog) {
int hh_len = dst->dev->hard_header_len;
__be16 proto = skb->protocol; __be16 proto = skb->protocol;
int ret; int ret;
...@@ -291,7 +290,7 @@ static int bpf_xmit(struct sk_buff *skb) ...@@ -291,7 +290,7 @@ static int bpf_xmit(struct sk_buff *skb)
/* If the header was expanded, headroom might be too /* If the header was expanded, headroom might be too
* small for L2 header to come, expand as needed. * small for L2 header to come, expand as needed.
*/ */
ret = xmit_check_hhlen(skb); ret = xmit_check_hhlen(skb, hh_len);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
......
...@@ -639,7 +639,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len ...@@ -639,7 +639,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
if (sk_can_busy_loop(sk)) if (sk_can_busy_loop(sk))
sk_busy_loop(sk, 1); /* only support non-blocking sockets */ sk_busy_loop(sk, 1); /* only support non-blocking sockets */
if (xsk_no_wakeup(sk)) if (xs->zc && xsk_no_wakeup(sk))
return 0; return 0;
pool = xs->pool; pool = xs->pool;
...@@ -967,6 +967,19 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) ...@@ -967,6 +967,19 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xp_get_pool(umem_xs->pool); xp_get_pool(umem_xs->pool);
xs->pool = umem_xs->pool; xs->pool = umem_xs->pool;
/* If underlying shared umem was created without Tx
* ring, allocate Tx descs array that Tx batching API
* utilizes
*/
if (xs->tx && !xs->pool->tx_descs) {
err = xp_alloc_tx_descs(xs->pool, xs);
if (err) {
xp_put_pool(xs->pool);
sockfd_put(sock);
goto out_unlock;
}
}
} }
xdp_get_umem(umem_xs->umem); xdp_get_umem(umem_xs->umem);
......
...@@ -42,6 +42,16 @@ void xp_destroy(struct xsk_buff_pool *pool) ...@@ -42,6 +42,16 @@ void xp_destroy(struct xsk_buff_pool *pool)
kvfree(pool); kvfree(pool);
} }
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{
pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
GFP_KERNEL);
if (!pool->tx_descs)
return -ENOMEM;
return 0;
}
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem) struct xdp_umem *umem)
{ {
...@@ -59,11 +69,9 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, ...@@ -59,11 +69,9 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
if (!pool->heads) if (!pool->heads)
goto out; goto out;
if (xs->tx) { if (xs->tx)
pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL); if (xp_alloc_tx_descs(pool, xs))
if (!pool->tx_descs)
goto out; goto out;
}
pool->chunk_mask = ~((u64)umem->chunk_size - 1); pool->chunk_mask = ~((u64)umem->chunk_size - 1);
pool->addrs_cnt = umem->size; pool->addrs_cnt = umem->size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment