Commit 05310f31 authored by Juergen Gross's avatar Juergen Gross Committed by Paolo Abeni

xen/netback: don't do grant copy across page boundary

Fix xenvif_get_requests() not to do grant copy operations across local
page boundaries. This requires to double the maximum number of copy
operations per queue, as each copy could now be split into 2.

Make sure that struct xenvif_tx_cb doesn't grow too large.

Cc: stable@vger.kernel.org
Fixes: ad7f402a ("xen/netback: Ensure protocol headers don't fall in the non-linear area")
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarPaul Durrant <paul@xen.org>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent f22c993f
...@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ ...@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
/* passed to gnttab_[un]map_refs with pages under (un)mapping */ /* passed to gnttab_[un]map_refs with pages under (un)mapping */
......
...@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, ...@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb { struct xenvif_tx_cb {
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
u8 copy_count; u8 copy_count;
u32 split_mask;
}; };
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
...@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) ...@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
struct sk_buff *skb = struct sk_buff *skb =
alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
if (unlikely(skb == NULL)) if (unlikely(skb == NULL))
return NULL; return NULL;
...@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue, ...@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
nr_slots = shinfo->nr_frags + 1; nr_slots = shinfo->nr_frags + 1;
copy_count(skb) = 0; copy_count(skb) = 0;
XENVIF_TX_CB(skb)->split_mask = 0;
/* Create copy ops for exactly data_len bytes into the skb head. */ /* Create copy ops for exactly data_len bytes into the skb head. */
__skb_put(skb, data_len); __skb_put(skb, data_len);
while (data_len > 0) { while (data_len > 0) {
int amount = data_len > txp->size ? txp->size : data_len; int amount = data_len > txp->size ? txp->size : data_len;
bool split = false;
cop->source.u.ref = txp->gref; cop->source.u.ref = txp->gref;
cop->source.domid = queue->vif->domid; cop->source.domid = queue->vif->domid;
...@@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue, ...@@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
- data_len); - data_len);
/* Don't cross local page boundary! */
if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
amount = XEN_PAGE_SIZE - cop->dest.offset;
XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
split = true;
}
cop->len = amount; cop->len = amount;
cop->flags = GNTCOPY_source_gref; cop->flags = GNTCOPY_source_gref;
...@@ -420,6 +432,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, ...@@ -420,6 +432,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
pending_idx = queue->pending_ring[index]; pending_idx = queue->pending_ring[index];
callback_param(queue, pending_idx).ctx = NULL; callback_param(queue, pending_idx).ctx = NULL;
copy_pending_idx(skb, copy_count(skb)) = pending_idx; copy_pending_idx(skb, copy_count(skb)) = pending_idx;
if (!split)
copy_count(skb)++; copy_count(skb)++;
cop++; cop++;
...@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, ...@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
nr_slots--; nr_slots--;
} else { } else {
/* The copy op partially covered the tx_request. /* The copy op partially covered the tx_request.
* The remainder will be mapped. * The remainder will be mapped or copied in the next
* iteration.
*/ */
txp->offset += amount; txp->offset += amount;
txp->size -= amount; txp->size -= amount;
...@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, ...@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
pending_idx = copy_pending_idx(skb, i); pending_idx = copy_pending_idx(skb, i);
newerr = (*gopp_copy)->status; newerr = (*gopp_copy)->status;
/* Split copies need to be handled together. */
if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
(*gopp_copy)++;
if (!newerr)
newerr = (*gopp_copy)->status;
}
if (likely(!newerr)) { if (likely(!newerr)) {
/* The first frag might still have this slot mapped */ /* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot) if (i < copy_count(skb) - 1 || !sharedslot)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment