Commit 6c4df17c authored by David S. Miller's avatar David S. Miller

Merge branch 'virtio_net-XDP-fixes'

Jesper Dangaard Brouer says:

====================
virtio_net: several bugs in XDP code for driver virtio_net

The virtio_net driver actually violates the original memory model of
XDP causing hard to debug crashes.  Per request of John Fastabend,
instead of removing the XDP feature I'm fixing as much as possible.
While testing virtio_net with XDP_REDIRECT I found 4 different bugs.

Patch-1: not enough tail-room for build_skb in receive_mergeable()
 only option is to disable XDP_REDIRECT in receive_mergeable()

Patch-2: XDP in receive_small() basically never worked (check wrong flag)

Patch-3: fix memory leak for XDP_REDIRECT in error cases

Patch-4: avoid crash when ndo_xdp_xmit is called on dev not ready for XDP

In the longer run, we should consider introducing a separate receive
function when attaching an XDP program, and also change the memory
model to be compatible with XDP when attaching an XDP prog.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9c4ff2a9 8dcc5b0a
...@@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi, ...@@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
if (unlikely(err)) { if (unlikely(err))
struct page *page = virt_to_head_page(xdp->data); return false; /* Caller handle free/refcnt */
put_page(page);
return false;
}
return true; return true;
} }
...@@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi, ...@@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
{ {
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
bool sent = __virtnet_xdp_xmit(vi, xdp); struct receive_queue *rq = vi->rq;
struct bpf_prog *xdp_prog;
bool sent;
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_dereference(rq->xdp_prog);
if (!xdp_prog)
return -ENXIO;
sent = __virtnet_xdp_xmit(vi, xdp);
if (!sent) if (!sent)
return -ENOSPC; return -ENOSPC;
return 0; return 0;
...@@ -546,8 +552,11 @@ static struct sk_buff *receive_small(struct net_device *dev, ...@@ -546,8 +552,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
struct page *page = virt_to_head_page(buf); struct page *page = virt_to_head_page(buf);
unsigned int delta = 0, err; unsigned int delta = 0;
struct page *xdp_page; struct page *xdp_page;
bool sent;
int err;
len -= vi->hdr_len; len -= vi->hdr_len;
rcu_read_lock(); rcu_read_lock();
...@@ -558,7 +567,7 @@ static struct sk_buff *receive_small(struct net_device *dev, ...@@ -558,7 +567,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
void *orig_data; void *orig_data;
u32 act; u32 act;
if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) if (unlikely(hdr->hdr.gso_type))
goto err_xdp; goto err_xdp;
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
...@@ -596,16 +605,19 @@ static struct sk_buff *receive_small(struct net_device *dev, ...@@ -596,16 +605,19 @@ static struct sk_buff *receive_small(struct net_device *dev,
delta = orig_data - xdp.data; delta = orig_data - xdp.data;
break; break;
case XDP_TX: case XDP_TX:
if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) sent = __virtnet_xdp_xmit(vi, &xdp);
if (unlikely(!sent)) {
trace_xdp_exception(vi->dev, xdp_prog, act); trace_xdp_exception(vi->dev, xdp_prog, act);
else goto err_xdp;
*xdp_xmit = true; }
*xdp_xmit = true;
rcu_read_unlock(); rcu_read_unlock();
goto xdp_xmit; goto xdp_xmit;
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_redirect(dev, &xdp, xdp_prog); err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (!err) if (err)
*xdp_xmit = true; goto err_xdp;
*xdp_xmit = true;
rcu_read_unlock(); rcu_read_unlock();
goto xdp_xmit; goto xdp_xmit;
default: default:
...@@ -677,7 +689,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -677,7 +689,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
unsigned int truesize; unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx); unsigned int headroom = mergeable_ctx_to_headroom(ctx);
int err; bool sent;
head_skb = NULL; head_skb = NULL;
...@@ -746,20 +758,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -746,20 +758,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
} }
break; break;
case XDP_TX: case XDP_TX:
if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) sent = __virtnet_xdp_xmit(vi, &xdp);
if (unlikely(!sent)) {
trace_xdp_exception(vi->dev, xdp_prog, act); trace_xdp_exception(vi->dev, xdp_prog, act);
else if (unlikely(xdp_page != page))
*xdp_xmit = true; put_page(xdp_page);
goto err_xdp;
}
*xdp_xmit = true;
if (unlikely(xdp_page != page)) if (unlikely(xdp_page != page))
goto err_xdp; goto err_xdp;
rcu_read_unlock(); rcu_read_unlock();
goto xdp_xmit; goto xdp_xmit;
case XDP_REDIRECT:
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (!err)
*xdp_xmit = true;
rcu_read_unlock();
goto xdp_xmit;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED: case XDP_ABORTED:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment