Commit a37f1229 authored by David Vrabel's avatar David Vrabel Committed by David S. Miller

xen-netback: batch copies for multiple to-guest rx packets

Instead of flushing the copy ops when an packet is complete, complete
packets when their copy ops are done.  This improves performance by
reducing the number of grant copy hypercalls.

Latency is still limited by the relatively small size of the copy
batch.
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
[re-based]
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 98f6d57c
...@@ -132,6 +132,7 @@ struct xenvif_copy_state { ...@@ -132,6 +132,7 @@ struct xenvif_copy_state {
struct gnttab_copy op[COPY_BATCH_SIZE]; struct gnttab_copy op[COPY_BATCH_SIZE];
RING_IDX idx[COPY_BATCH_SIZE]; RING_IDX idx[COPY_BATCH_SIZE];
unsigned int num; unsigned int num;
struct sk_buff_head *completed;
}; };
struct xenvif_queue { /* Per-queue data for xenvif */ struct xenvif_queue { /* Per-queue data for xenvif */
......
...@@ -133,6 +133,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) ...@@ -133,6 +133,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
static void xenvif_rx_copy_flush(struct xenvif_queue *queue) static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
{ {
unsigned int i; unsigned int i;
int notify;
gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
...@@ -154,6 +155,13 @@ static void xenvif_rx_copy_flush(struct xenvif_queue *queue) ...@@ -154,6 +155,13 @@ static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
} }
queue->rx_copy.num = 0; queue->rx_copy.num = 0;
/* Push responses for all completed packets. */
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);
__skb_queue_purge(queue->rx_copy.completed);
} }
static void xenvif_rx_copy_add(struct xenvif_queue *queue, static void xenvif_rx_copy_add(struct xenvif_queue *queue,
...@@ -279,18 +287,10 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue, ...@@ -279,18 +287,10 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue,
static void xenvif_rx_complete(struct xenvif_queue *queue, static void xenvif_rx_complete(struct xenvif_queue *queue,
struct xenvif_pkt_state *pkt) struct xenvif_pkt_state *pkt)
{ {
int notify; /* All responses are ready to be pushed. */
/* Complete any outstanding copy ops for this skb. */
xenvif_rx_copy_flush(queue);
/* Push responses and notify. */
queue->rx.rsp_prod_pvt = queue->rx.req_cons; queue->rx.rsp_prod_pvt = queue->rx.req_cons;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);
dev_kfree_skb(pkt->skb); __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
} }
static void xenvif_rx_next_chunk(struct xenvif_queue *queue, static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
...@@ -429,13 +429,20 @@ void xenvif_rx_skb(struct xenvif_queue *queue) ...@@ -429,13 +429,20 @@ void xenvif_rx_skb(struct xenvif_queue *queue)
void xenvif_rx_action(struct xenvif_queue *queue) void xenvif_rx_action(struct xenvif_queue *queue)
{ {
struct sk_buff_head completed_skbs;
unsigned int work_done = 0; unsigned int work_done = 0;
__skb_queue_head_init(&completed_skbs);
queue->rx_copy.completed = &completed_skbs;
while (xenvif_rx_ring_slots_available(queue) && while (xenvif_rx_ring_slots_available(queue) &&
work_done < RX_BATCH_SIZE) { work_done < RX_BATCH_SIZE) {
xenvif_rx_skb(queue); xenvif_rx_skb(queue);
work_done++; work_done++;
} }
/* Flush any pending copies and complete all skbs. */
xenvif_rx_copy_flush(queue);
} }
static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment