Commit 1915a712 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin Committed by Rusty Russell

virtio_net: use virtqueue_xxx wrappers

Switch virtio_net to new virtqueue_xxx wrappers.
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent 09ec6b69
...@@ -119,7 +119,7 @@ static void skb_xmit_done(struct virtqueue *svq) ...@@ -119,7 +119,7 @@ static void skb_xmit_done(struct virtqueue *svq)
struct virtnet_info *vi = svq->vdev->priv; struct virtnet_info *vi = svq->vdev->priv;
/* Suppress further interrupts. */ /* Suppress further interrupts. */
svq->vq_ops->disable_cb(svq); virtqueue_disable_cb(svq);
/* We were probably waiting for more output buffers. */ /* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev); netif_wake_queue(vi->dev);
...@@ -207,7 +207,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) ...@@ -207,7 +207,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
return -EINVAL; return -EINVAL;
} }
page = vi->rvq->vq_ops->get_buf(vi->rvq, &len); page = virtqueue_get_buf(vi->rvq, &len);
if (!page) { if (!page) {
pr_debug("%s: rx error: %d buffers missing\n", pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->name, hdr->mhdr.num_buffers); skb->dev->name, hdr->mhdr.num_buffers);
...@@ -339,7 +339,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) ...@@ -339,7 +339,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_to_sgvec(skb, sg + 1, 0, skb->len); skb_to_sgvec(skb, sg + 1, 0, skb->len);
err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb); err = virtqueue_add_buf(vi->rvq, sg, 0, 2, skb);
if (err < 0) if (err < 0)
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -386,7 +386,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) ...@@ -386,7 +386,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
/* chain first in list head */ /* chain first in list head */
first->private = (unsigned long)list; first->private = (unsigned long)list;
err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, err = virtqueue_add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
first); first);
if (err < 0) if (err < 0)
give_pages(vi, first); give_pages(vi, first);
...@@ -406,7 +406,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) ...@@ -406,7 +406,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
sg_init_one(&sg, page_address(page), PAGE_SIZE); sg_init_one(&sg, page_address(page), PAGE_SIZE);
err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page); err = virtqueue_add_buf(vi->rvq, &sg, 0, 1, page);
if (err < 0) if (err < 0)
give_pages(vi, page); give_pages(vi, page);
...@@ -435,7 +435,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) ...@@ -435,7 +435,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
} while (err > 0); } while (err > 0);
if (unlikely(vi->num > vi->max)) if (unlikely(vi->num > vi->max))
vi->max = vi->num; vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq); virtqueue_kick(vi->rvq);
return !oom; return !oom;
} }
...@@ -444,7 +444,7 @@ static void skb_recv_done(struct virtqueue *rvq) ...@@ -444,7 +444,7 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv; struct virtnet_info *vi = rvq->vdev->priv;
/* Schedule NAPI, Suppress further interrupts if successful. */ /* Schedule NAPI, Suppress further interrupts if successful. */
if (napi_schedule_prep(&vi->napi)) { if (napi_schedule_prep(&vi->napi)) {
rvq->vq_ops->disable_cb(rvq); virtqueue_disable_cb(rvq);
__napi_schedule(&vi->napi); __napi_schedule(&vi->napi);
} }
} }
...@@ -473,7 +473,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -473,7 +473,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
again: again:
while (received < budget && while (received < budget &&
(buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
receive_buf(vi->dev, buf, len); receive_buf(vi->dev, buf, len);
--vi->num; --vi->num;
received++; received++;
...@@ -487,9 +487,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -487,9 +487,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */ /* Out of packets? */
if (received < budget) { if (received < budget) {
napi_complete(napi); napi_complete(napi);
if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
napi_schedule_prep(napi)) { napi_schedule_prep(napi)) {
vi->rvq->vq_ops->disable_cb(vi->rvq); virtqueue_disable_cb(vi->rvq);
__napi_schedule(napi); __napi_schedule(napi);
goto again; goto again;
} }
...@@ -503,7 +503,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) ...@@ -503,7 +503,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int len, tot_sgs = 0; unsigned int len, tot_sgs = 0;
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb); pr_debug("Sent skb %p\n", skb);
vi->dev->stats.tx_bytes += skb->len; vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++; vi->dev->stats.tx_packets++;
...@@ -559,7 +559,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) ...@@ -559,7 +559,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); return virtqueue_add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
} }
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
...@@ -578,14 +578,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -578,14 +578,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(capacity < 0)) { if (unlikely(capacity < 0)) {
netif_stop_queue(dev); netif_stop_queue(dev);
dev_warn(&dev->dev, "Unexpected full queue\n"); dev_warn(&dev->dev, "Unexpected full queue\n");
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { if (unlikely(!virtqueue_enable_cb(vi->svq))) {
vi->svq->vq_ops->disable_cb(vi->svq); virtqueue_disable_cb(vi->svq);
netif_start_queue(dev); netif_start_queue(dev);
goto again; goto again;
} }
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
vi->svq->vq_ops->kick(vi->svq); virtqueue_kick(vi->svq);
/* Don't wait up for transmitted skbs to be freed. */ /* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb); skb_orphan(skb);
...@@ -595,12 +595,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -595,12 +595,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
* before it gets out of hand. Naturally, this wastes entries. */ * before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) { if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev); netif_stop_queue(dev);
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { if (unlikely(!virtqueue_enable_cb(vi->svq))) {
/* More just got used, free them then recheck. */ /* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi); capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) { if (capacity >= 2+MAX_SKB_FRAGS) {
netif_start_queue(dev); netif_start_queue(dev);
vi->svq->vq_ops->disable_cb(vi->svq); virtqueue_disable_cb(vi->svq);
} }
} }
} }
...@@ -645,7 +645,7 @@ static int virtnet_open(struct net_device *dev) ...@@ -645,7 +645,7 @@ static int virtnet_open(struct net_device *dev)
* now. virtnet_poll wants re-enable the queue, so we disable here. * now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */ * We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) { if (napi_schedule_prep(&vi->napi)) {
vi->rvq->vq_ops->disable_cb(vi->rvq); virtqueue_disable_cb(vi->rvq);
__napi_schedule(&vi->napi); __napi_schedule(&vi->napi);
} }
return 0; return 0;
...@@ -682,15 +682,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, ...@@ -682,15 +682,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length); sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0); BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
vi->cvq->vq_ops->kick(vi->cvq); virtqueue_kick(vi->cvq);
/* /*
* Spin for a response, the kick causes an ioport write, trapping * Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately. * into the hypervisor, so the request should be handled immediately.
*/ */
while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp)) while (!virtqueue_get_buf(vi->cvq, &tmp))
cpu_relax(); cpu_relax();
return status == VIRTIO_NET_OK; return status == VIRTIO_NET_OK;
...@@ -1006,13 +1006,13 @@ static void free_unused_bufs(struct virtnet_info *vi) ...@@ -1006,13 +1006,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
{ {
void *buf; void *buf;
while (1) { while (1) {
buf = vi->svq->vq_ops->detach_unused_buf(vi->svq); buf = virtqueue_detach_unused_buf(vi->svq);
if (!buf) if (!buf)
break; break;
dev_kfree_skb(buf); dev_kfree_skb(buf);
} }
while (1) { while (1) {
buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq); buf = virtqueue_detach_unused_buf(vi->rvq);
if (!buf) if (!buf)
break; break;
if (vi->mergeable_rx_bufs || vi->big_packets) if (vi->mergeable_rx_bufs || vi->big_packets)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment