Commit c7dab99b authored by Brice Goglin's avatar Brice Goglin Committed by Jeff Garzik

[PATCH] myri10ge: switch to page-based skb

Switch to physical page skb, by calling the new page-based
allocation routines and using myri10ge_page_rx_done().
Signed-off-by: default avatarBrice Goglin <brice@myri.com>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent dd50f336
...@@ -1264,13 +1264,13 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) ...@@ -1264,13 +1264,13 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
rx_done->entry[idx].length = 0; rx_done->entry[idx].length = 0;
checksum = csum_unfold(rx_done->entry[idx].checksum); checksum = csum_unfold(rx_done->entry[idx].checksum);
if (length <= mgp->small_bytes) if (length <= mgp->small_bytes)
rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, rx_ok = myri10ge_page_rx_done(mgp, &mgp->rx_small,
mgp->small_bytes, mgp->small_bytes,
length, checksum); length, checksum);
else else
rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, rx_ok = myri10ge_page_rx_done(mgp, &mgp->rx_big,
mgp->dev->mtu + ETH_HLEN, mgp->big_bytes,
length, checksum); length, checksum);
rx_packets += rx_ok; rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length; rx_bytes += rx_ok * (unsigned long)length;
cnt++; cnt++;
...@@ -1284,6 +1284,14 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) ...@@ -1284,6 +1284,14 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
rx_done->cnt = cnt; rx_done->cnt = cnt;
mgp->stats.rx_packets += rx_packets; mgp->stats.rx_packets += rx_packets;
mgp->stats.rx_bytes += rx_bytes; mgp->stats.rx_bytes += rx_bytes;
/* restock receive rings if needed */
if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
} }
static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
...@@ -1674,56 +1682,48 @@ static int myri10ge_allocate_rings(struct net_device *dev) ...@@ -1674,56 +1682,48 @@ static int myri10ge_allocate_rings(struct net_device *dev)
goto abort_with_rx_small_info; goto abort_with_rx_small_info;
/* Fill the receive rings */ /* Fill the receive rings */
mgp->rx_big.cnt = 0;
mgp->rx_small.cnt = 0;
mgp->rx_big.fill_cnt = 0;
mgp->rx_small.fill_cnt = 0;
mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
mgp->rx_small.watchdog_needed = 0;
mgp->rx_big.watchdog_needed = 0;
myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
for (i = 0; i <= mgp->rx_small.mask; i++) { if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) {
status = myri10ge_getbuf(&mgp->rx_small, mgp, printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n",
mgp->small_bytes, i); dev->name, mgp->rx_small.fill_cnt);
if (status) { goto abort_with_rx_small_ring;
printk(KERN_ERR
"myri10ge: %s: alloced only %d small bufs\n",
dev->name, i);
goto abort_with_rx_small_ring;
}
} }
for (i = 0; i <= mgp->rx_big.mask; i++) { myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
status = if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) {
myri10ge_getbuf(&mgp->rx_big, mgp, dev->mtu + ETH_HLEN, i); printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n",
if (status) { dev->name, mgp->rx_big.fill_cnt);
printk(KERN_ERR goto abort_with_rx_big_ring;
"myri10ge: %s: alloced only %d big bufs\n",
dev->name, i);
goto abort_with_rx_big_ring;
}
} }
return 0; return 0;
abort_with_rx_big_ring: abort_with_rx_big_ring:
for (i = 0; i <= mgp->rx_big.mask; i++) { for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
if (mgp->rx_big.info[i].skb != NULL) int idx = i & mgp->rx_big.mask;
dev_kfree_skb_any(mgp->rx_big.info[i].skb); myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
if (pci_unmap_len(&mgp->rx_big.info[i], len)) mgp->big_bytes);
pci_unmap_single(mgp->pdev, put_page(mgp->rx_big.info[idx].page);
pci_unmap_addr(&mgp->rx_big.info[i],
bus),
pci_unmap_len(&mgp->rx_big.info[i],
len),
PCI_DMA_FROMDEVICE);
} }
abort_with_rx_small_ring: abort_with_rx_small_ring:
for (i = 0; i <= mgp->rx_small.mask; i++) { for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
if (mgp->rx_small.info[i].skb != NULL) int idx = i & mgp->rx_small.mask;
dev_kfree_skb_any(mgp->rx_small.info[i].skb); myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
if (pci_unmap_len(&mgp->rx_small.info[i], len)) mgp->small_bytes + MXGEFW_PAD);
pci_unmap_single(mgp->pdev, put_page(mgp->rx_small.info[idx].page);
pci_unmap_addr(&mgp->rx_small.info[i],
bus),
pci_unmap_len(&mgp->rx_small.info[i],
len),
PCI_DMA_FROMDEVICE);
} }
kfree(mgp->rx_big.info); kfree(mgp->rx_big.info);
abort_with_rx_small_info: abort_with_rx_small_info:
...@@ -1756,30 +1756,24 @@ static void myri10ge_free_rings(struct net_device *dev) ...@@ -1756,30 +1756,24 @@ static void myri10ge_free_rings(struct net_device *dev)
mgp = netdev_priv(dev); mgp = netdev_priv(dev);
for (i = 0; i <= mgp->rx_big.mask; i++) { for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
if (mgp->rx_big.info[i].skb != NULL) idx = i & mgp->rx_big.mask;
dev_kfree_skb_any(mgp->rx_big.info[i].skb); if (i == mgp->rx_big.fill_cnt - 1)
if (pci_unmap_len(&mgp->rx_big.info[i], len)) mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
pci_unmap_single(mgp->pdev, myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
pci_unmap_addr(&mgp->rx_big.info[i], mgp->big_bytes);
bus), put_page(mgp->rx_big.info[idx].page);
pci_unmap_len(&mgp->rx_big.info[i],
len),
PCI_DMA_FROMDEVICE);
}
for (i = 0; i <= mgp->rx_small.mask; i++) {
if (mgp->rx_small.info[i].skb != NULL)
dev_kfree_skb_any(mgp->rx_small.info[i].skb);
if (pci_unmap_len(&mgp->rx_small.info[i], len))
pci_unmap_single(mgp->pdev,
pci_unmap_addr(&mgp->rx_small.info[i],
bus),
pci_unmap_len(&mgp->rx_small.info[i],
len),
PCI_DMA_FROMDEVICE);
} }
for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
idx = i & mgp->rx_small.mask;
if (i == mgp->rx_small.fill_cnt - 1)
mgp->rx_small.info[idx].page_offset =
MYRI10GE_ALLOC_SIZE;
myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
put_page(mgp->rx_small.info[idx].page);
}
tx = &mgp->tx; tx = &mgp->tx;
while (tx->done != tx->req) { while (tx->done != tx->req) {
idx = tx->done & tx->mask; idx = tx->done & tx->mask;
...@@ -1847,19 +1841,18 @@ static int myri10ge_open(struct net_device *dev) ...@@ -1847,19 +1841,18 @@ static int myri10ge_open(struct net_device *dev)
*/ */
if (dev->mtu <= ETH_DATA_LEN) if (dev->mtu <= ETH_DATA_LEN)
mgp->small_bytes = 128; /* enough for a TCP header */ /* enough for a TCP header */
mgp->small_bytes = (128 > SMP_CACHE_BYTES)
? (128 - MXGEFW_PAD)
: (SMP_CACHE_BYTES - MXGEFW_PAD);
else else
mgp->small_bytes = ETH_FRAME_LEN; /* enough for an ETH_DATA_LEN frame */ /* enough for an ETH_DATA_LEN frame */
mgp->small_bytes = ETH_FRAME_LEN;
/* Override the small buffer size? */ /* Override the small buffer size? */
if (myri10ge_small_bytes > 0) if (myri10ge_small_bytes > 0)
mgp->small_bytes = myri10ge_small_bytes; mgp->small_bytes = myri10ge_small_bytes;
/* If the user sets an obscenely small MTU, adjust the small
* bytes down to nearly nothing */
if (mgp->small_bytes >= (dev->mtu + ETH_HLEN))
mgp->small_bytes = 64;
/* get the lanai pointers to the send and receive rings */ /* get the lanai pointers to the send and receive rings */
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
...@@ -1895,17 +1888,23 @@ static int myri10ge_open(struct net_device *dev) ...@@ -1895,17 +1888,23 @@ static int myri10ge_open(struct net_device *dev)
mgp->rx_big.wc_fifo = NULL; mgp->rx_big.wc_fifo = NULL;
} }
status = myri10ge_allocate_rings(dev);
if (status != 0)
goto abort_with_nothing;
/* Firmware needs the big buff size as a power of 2. Lie and /* Firmware needs the big buff size as a power of 2. Lie and
* tell him the buffer is larger, because we only use 1 * tell him the buffer is larger, because we only use 1
* buffer/pkt, and the mtu will prevent overruns. * buffer/pkt, and the mtu will prevent overruns.
*/ */
big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD; big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD;
while ((big_pow2 & (big_pow2 - 1)) != 0) if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
big_pow2++; while ((big_pow2 & (big_pow2 - 1)) != 0)
big_pow2++;
mgp->big_bytes = dev->mtu + ETH_HLEN + MXGEFW_PAD;
} else {
big_pow2 = MYRI10GE_ALLOC_SIZE;
mgp->big_bytes = big_pow2;
}
status = myri10ge_allocate_rings(dev);
if (status != 0)
goto abort_with_nothing;
/* now give firmware buffers sizes, and MTU */ /* now give firmware buffers sizes, and MTU */
cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
...@@ -2888,6 +2887,21 @@ static void myri10ge_watchdog_timer(unsigned long arg) ...@@ -2888,6 +2887,21 @@ static void myri10ge_watchdog_timer(unsigned long arg)
struct myri10ge_priv *mgp; struct myri10ge_priv *mgp;
mgp = (struct myri10ge_priv *)arg; mgp = (struct myri10ge_priv *)arg;
if (mgp->rx_small.watchdog_needed) {
myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
mgp->small_bytes + MXGEFW_PAD, 1);
if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >=
myri10ge_fill_thresh)
mgp->rx_small.watchdog_needed = 0;
}
if (mgp->rx_big.watchdog_needed) {
myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1);
if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >=
myri10ge_fill_thresh)
mgp->rx_big.watchdog_needed = 0;
}
if (mgp->tx.req != mgp->tx.done && if (mgp->tx.req != mgp->tx.done &&
mgp->tx.done == mgp->watchdog_tx_done && mgp->tx.done == mgp->watchdog_tx_done &&
mgp->watchdog_tx_req != mgp->watchdog_tx_done) mgp->watchdog_tx_req != mgp->watchdog_tx_done)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment