Commit e4e8452a authored by Willem de Bruijn's avatar Willem de Bruijn Committed by David S. Miller

virtio-net: napi helper functions

Prepare virtio-net for tx napi by converting existing napi code to
use helper functions. This also deduplicates some logic.
Signed-off-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 14933dc8
...@@ -239,6 +239,26 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) ...@@ -239,6 +239,26 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p; return p;
} }
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
if (napi_schedule_prep(napi)) {
virtqueue_disable_cb(vq);
__napi_schedule(napi);
}
}
static void virtqueue_napi_complete(struct napi_struct *napi,
struct virtqueue *vq, int processed)
{
int opaque;
opaque = virtqueue_enable_cb_prepare(vq);
if (napi_complete_done(napi, processed) &&
unlikely(virtqueue_poll(vq, opaque)))
virtqueue_napi_schedule(napi, vq);
}
static void skb_xmit_done(struct virtqueue *vq) static void skb_xmit_done(struct virtqueue *vq)
{ {
struct virtnet_info *vi = vq->vdev->priv; struct virtnet_info *vi = vq->vdev->priv;
...@@ -936,27 +956,20 @@ static void skb_recv_done(struct virtqueue *rvq) ...@@ -936,27 +956,20 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv; struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
/* Schedule NAPI, Suppress further interrupts if successful. */ virtqueue_napi_schedule(&rq->napi, rvq);
if (napi_schedule_prep(&rq->napi)) {
virtqueue_disable_cb(rvq);
__napi_schedule(&rq->napi);
}
} }
static void virtnet_napi_enable(struct receive_queue *rq) static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{ {
napi_enable(&rq->napi); napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we /* If all buffers were filled by other side before we napi_enabled, we
* won't get another interrupt, so process any outstanding packets * won't get another interrupt, so process any outstanding packets now.
* now. virtnet_poll wants re-enable the queue, so we disable here. * Call local_bh_enable after to trigger softIRQ processing.
* We synchronize against interrupts via NAPI_STATE_SCHED */ */
if (napi_schedule_prep(&rq->napi)) { local_bh_disable();
virtqueue_disable_cb(rq->vq); virtqueue_napi_schedule(napi, vq);
local_bh_disable(); local_bh_enable();
__napi_schedule(&rq->napi);
local_bh_enable();
}
} }
static void refill_work(struct work_struct *work) static void refill_work(struct work_struct *work)
...@@ -971,7 +984,7 @@ static void refill_work(struct work_struct *work) ...@@ -971,7 +984,7 @@ static void refill_work(struct work_struct *work)
napi_disable(&rq->napi); napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
virtnet_napi_enable(rq); virtnet_napi_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in /* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. * we will *never* try to fill again.
...@@ -1011,21 +1024,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -1011,21 +1024,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{ {
struct receive_queue *rq = struct receive_queue *rq =
container_of(napi, struct receive_queue, napi); container_of(napi, struct receive_queue, napi);
unsigned int r, received; unsigned int received;
received = virtnet_receive(rq, budget); received = virtnet_receive(rq, budget);
/* Out of packets? */ /* Out of packets? */
if (received < budget) { if (received < budget)
r = virtqueue_enable_cb_prepare(rq->vq); virtqueue_napi_complete(napi, rq->vq, received);
if (napi_complete_done(napi, received)) {
if (unlikely(virtqueue_poll(rq->vq, r)) &&
napi_schedule_prep(napi)) {
virtqueue_disable_cb(rq->vq);
__napi_schedule(napi);
}
}
}
return received; return received;
} }
...@@ -1040,7 +1045,7 @@ static int virtnet_open(struct net_device *dev) ...@@ -1040,7 +1045,7 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */ /* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0); schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(&vi->rq[i]); virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
} }
return 0; return 0;
...@@ -1747,7 +1752,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) ...@@ -1747,7 +1752,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
schedule_delayed_work(&vi->refill, 0); schedule_delayed_work(&vi->refill, 0);
for (i = 0; i < vi->max_queue_pairs; i++) for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_napi_enable(&vi->rq[i]); virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
} }
netif_device_attach(vi->dev); netif_device_attach(vi->dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment