Commit c2677a6f authored by Jennifer Herbert's avatar Jennifer Herbert Committed by David Vrabel

xen-netback: use foreign page information from the pages themselves

Use the foreign page flag in netback to get the domid and grant ref
needed for the grant copy.  This signficiantly simplifies the netback
code and makes netback work with foreign pages from other backends
(e.g., blkback).

This allows blkback to use iSCSI disks provided by domUs running on
the same host.
Signed-off-by: default avatarJennifer Herbert <jennifer.herbert@citrix.com>
Acked-by: default avatarIan Campbell <ian.campbell@citrix.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent 8da7633f
...@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, ...@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
struct netrx_pending_operations *npo, struct netrx_pending_operations *npo,
struct page *page, unsigned long size, struct page *page, unsigned long size,
unsigned long offset, int *head, unsigned long offset, int *head)
struct xenvif_queue *foreign_queue,
grant_ref_t foreign_gref)
{ {
struct gnttab_copy *copy_gop; struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta; struct xenvif_rx_meta *meta;
...@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb ...@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
offset &= ~PAGE_MASK; offset &= ~PAGE_MASK;
while (size > 0) { while (size > 0) {
struct xen_page_foreign *foreign;
BUG_ON(offset >= PAGE_SIZE); BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
...@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb ...@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
copy_gop->flags = GNTCOPY_dest_gref; copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes; copy_gop->len = bytes;
if (foreign_queue) { foreign = xen_page_foreign(page);
copy_gop->source.domid = foreign_queue->vif->domid; if (foreign) {
copy_gop->source.u.ref = foreign_gref; copy_gop->source.domid = foreign->domid;
copy_gop->source.u.ref = foreign->gref;
copy_gop->flags |= GNTCOPY_source_gref; copy_gop->flags |= GNTCOPY_source_gref;
} else { } else {
copy_gop->source.domid = DOMID_SELF; copy_gop->source.domid = DOMID_SELF;
...@@ -405,35 +406,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb ...@@ -405,35 +406,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
} }
} }
/*
* Find the grant ref for a given frag in a chain of struct ubuf_info's
* skb: the skb itself
* i: the frag's number
* ubuf: a pointer to an element in the chain. It should not be NULL
*
* Returns a pointer to the element in the chain where the page were found. If
* not found, returns NULL.
* See the definition of callback_struct in common.h for more details about
* the chain.
*/
static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
const int i,
const struct ubuf_info *ubuf)
{
struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
do {
u16 pending_idx = ubuf->desc;
if (skb_shinfo(skb)->frags[i].page.p ==
foreign_queue->mmap_pages[pending_idx])
break;
ubuf = (struct ubuf_info *) ubuf->ctx;
} while (ubuf);
return ubuf;
}
/* /*
* Prepare an SKB to be transmitted to the frontend. * Prepare an SKB to be transmitted to the frontend.
* *
...@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1; int head = 1;
int old_meta_prod; int old_meta_prod;
int gso_type; int gso_type;
const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
const struct ubuf_info *const head_ubuf = ubuf;
old_meta_prod = npo->meta_prod; old_meta_prod = npo->meta_prod;
...@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
len = skb_tail_pointer(skb) - data; len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(queue, skb, npo, xenvif_gop_frag_copy(queue, skb, npo,
virt_to_page(data), len, offset, &head, virt_to_page(data), len, offset, &head);
NULL,
0);
data += len; data += len;
} }
for (i = 0; i < nr_frags; i++) { for (i = 0; i < nr_frags; i++) {
/* This variable also signals whether foreign_gref has a real
* value or not.
*/
struct xenvif_queue *foreign_queue = NULL;
grant_ref_t foreign_gref;
if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
(ubuf->callback == &xenvif_zerocopy_callback)) {
const struct ubuf_info *const startpoint = ubuf;
/* Ideally ubuf points to the chain element which
* belongs to this frag. Or if frags were removed from
* the beginning, then shortly before it.
*/
ubuf = xenvif_find_gref(skb, i, ubuf);
/* Try again from the beginning of the list, if we
* haven't tried from there. This only makes sense in
* the unlikely event of reordering the original frags.
* For injected local pages it's an unnecessary second
* run.
*/
if (unlikely(!ubuf) && startpoint != head_ubuf)
ubuf = xenvif_find_gref(skb, i, head_ubuf);
if (likely(ubuf)) {
u16 pending_idx = ubuf->desc;
foreign_queue = ubuf_to_queue(ubuf);
foreign_gref =
foreign_queue->pending_tx_info[pending_idx].req.gref;
/* Just a safety measure. If this was the last
* element on the list, the for loop will
* iterate again if a local page were added to
* the end. Using head_ubuf here prevents the
* second search on the chain. Or the original
* frags changed order, but that's less likely.
* In any way, ubuf shouldn't be NULL.
*/
ubuf = ubuf->ctx ?
(struct ubuf_info *) ubuf->ctx :
head_ubuf;
} else
/* This frag was a local page, added to the
* array after the skb left netback.
*/
ubuf = head_ubuf;
}
xenvif_gop_frag_copy(queue, skb, npo, xenvif_gop_frag_copy(queue, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].page_offset,
&head, &head);
foreign_queue,
foreign_queue ? foreign_gref : UINT_MAX);
} }
return npo->meta_prod - old_meta_prod; return npo->meta_prod - old_meta_prod;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment