Commit 0b70195e authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/xen-netback/netback.c

A bug fix overlapped with changing how the netback SKB control
block is implemented.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ce22bb61 17e84a92
...@@ -1249,6 +1249,8 @@ static int at86rf230_remove(struct spi_device *spi) ...@@ -1249,6 +1249,8 @@ static int at86rf230_remove(struct spi_device *spi)
struct at86rf230_local *lp = spi_get_drvdata(spi); struct at86rf230_local *lp = spi_get_drvdata(spi);
struct at86rf230_platform_data *pdata = spi->dev.platform_data; struct at86rf230_platform_data *pdata = spi->dev.platform_data;
/* mask all at86rf230 irq's */
at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_device(lp->dev); ieee802154_unregister_device(lp->dev);
free_irq(spi->irq, lp); free_irq(spi->irq, lp);
......
...@@ -191,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) ...@@ -191,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* into multiple copies tend to give large frags their * into multiple copies tend to give large frags their
* own buffers as before. * own buffers as before.
*/ */
if ((offset + size > MAX_BUFFER_OFFSET) && BUG_ON(size > MAX_BUFFER_OFFSET);
(size <= MAX_BUFFER_OFFSET) && offset && !head) if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
return true; return true;
return false; return false;
...@@ -511,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif) ...@@ -511,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
RING_IDX max_slots_needed; RING_IDX max_slots_needed;
RING_IDX old_req_cons;
RING_IDX ring_slots_used;
int i; int i;
/* We need a cheap worse case estimate for the number of /* We need a cheap worse case estimate for the number of
...@@ -522,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif) ...@@ -522,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
PAGE_SIZE); PAGE_SIZE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned int size; unsigned int size;
unsigned int offset;
size = skb_frag_size(&skb_shinfo(skb)->frags[i]); size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); offset = skb_shinfo(skb)->frags[i].page_offset;
/* For a worse-case estimate we need to factor in
* the fragment page offset as this will affect the
* number of times xenvif_gop_frag_copy() will
* call start_new_rx_buffer().
*/
max_slots_needed += DIV_ROUND_UP(offset + size,
PAGE_SIZE);
} }
/* To avoid the estimate becoming too pessimal for some
* frontends that limit posted rx requests, cap the estimate
* at MAX_SKB_FRAGS.
*/
if (max_slots_needed > MAX_SKB_FRAGS)
max_slots_needed = MAX_SKB_FRAGS;
/* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) && if (skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
...@@ -539,8 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif) ...@@ -539,8 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
} else } else
vif->rx_last_skb_slots = 0; vif->rx_last_skb_slots = 0;
old_req_cons = vif->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo); XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
BUG_ON(XENVIF_RX_CB(skb)->meta_slots_used > max_slots_needed); ring_slots_used = vif->rx.req_cons - old_req_cons;
BUG_ON(ring_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb); __skb_queue_tail(&rxq, skb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment