Commit 3255eeec authored by François Romieu's avatar François Romieu Committed by Jeff Garzik

[PATCH] via-velocity: Rx copybreak

Handle copybreak.
- velocity_rx_refill() is modified to allow the processing of a Rx desc
  ring wherein the empty skb slots are not necessarily contiguous. Given
  the preceeding changes, rx_copybreak should not need anything else;
- the driver does not rely on rd_info->skb_dma set to NULL any more;
- pci_dma_sync_single_for_{cpu/device} changes as a bonus;
- more function documentation.

Some inspiration borrowed from similar r8169 code.
parent 0accc7c5
...@@ -226,6 +226,10 @@ VELOCITY_PARAM(wol_opts, "Wake On Lan options"); ...@@ -226,6 +226,10 @@ VELOCITY_PARAM(wol_opts, "Wake On Lan options");
VELOCITY_PARAM(int_works, "Number of packets per interrupt services"); VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
static int rx_copybreak = 200;
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
static int velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent); static int velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent);
static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info); static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info);
static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev); static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
...@@ -1007,13 +1011,22 @@ static int velocity_rx_refill(struct velocity_info *vptr) ...@@ -1007,13 +1011,22 @@ static int velocity_rx_refill(struct velocity_info *vptr)
{ {
int dirty = vptr->rd_dirty, done = 0, ret = 0; int dirty = vptr->rd_dirty, done = 0, ret = 0;
while (!vptr->rd_info[dirty].skb) { do {
ret = velocity_alloc_rx_buf(vptr, dirty); struct rx_desc *rd = vptr->rd_ring + dirty;
if (ret < 0)
/* Fine for an all zero Rx desc at init time as well */
if (rd->rdesc0.owner == cpu_to_le32(OWNED_BY_NIC))
break; break;
if (!vptr->rd_info[dirty].skb) {
ret = velocity_alloc_rx_buf(vptr, dirty);
if (ret < 0)
break;
}
done++; done++;
dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
} } while (dirty != vptr->rd_curr);
if (done) { if (done) {
vptr->rd_dirty = dirty; vptr->rd_dirty = dirty;
vptr->rd_filled += done; vptr->rd_filled += done;
...@@ -1072,7 +1085,7 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) ...@@ -1072,7 +1085,7 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
for (i = 0; i < vptr->options.numrx; i++) { for (i = 0; i < vptr->options.numrx; i++) {
struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
if (!rd_info->skb_dma) if (!rd_info->skb)
continue; continue;
pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
...@@ -1208,7 +1221,6 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) ...@@ -1208,7 +1221,6 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
/* /*
* Don't drop CE or RL error frame although RXOK is off * Don't drop CE or RL error frame although RXOK is off
* FIXME: need to handle copybreak
*/ */
if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
if (velocity_receive_frame(vptr, rd_curr) < 0) if (velocity_receive_frame(vptr, rd_curr) < 0)
...@@ -1267,6 +1279,43 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) ...@@ -1267,6 +1279,43 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
} }
} }
/**
* velocity_rx_copy - in place Rx copy for small packets
* @rx_skb: network layer packet buffer candidate
* @pkt_size: received data size
* @rd: receive packet descriptor
* @dev: network device
*
* Replace the current skb that is scheduled for Rx processing by a
* shorter, immediatly allocated skb, if the received packet is small
* enough. This function returns a negative value if the received
* packet is too big or if memory is exhausted.
*/
static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
struct velocity_info *vptr)
{
int ret = -1;
if (pkt_size < rx_copybreak) {
struct sk_buff *new_skb;
new_skb = dev_alloc_skb(pkt_size + 2);
if (new_skb) {
new_skb->dev = vptr->dev;
new_skb->ip_summed = rx_skb[0]->ip_summed;
if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
skb_reserve(new_skb, 2);
memcpy(new_skb->data, rx_skb[0]->tail, pkt_size);
*rx_skb = new_skb;
ret = 0;
}
}
return ret;
}
/** /**
* velocity_iph_realign - IP header alignment * velocity_iph_realign - IP header alignment
* @vptr: velocity we are handling * @vptr: velocity we are handling
...@@ -1300,6 +1349,7 @@ static inline void velocity_iph_realign(struct velocity_info *vptr, ...@@ -1300,6 +1349,7 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
static int velocity_receive_frame(struct velocity_info *vptr, int idx) static int velocity_receive_frame(struct velocity_info *vptr, int idx)
{ {
void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
struct net_device_stats *stats = &vptr->stats; struct net_device_stats *stats = &vptr->stats;
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
struct rx_desc *rd = &(vptr->rd_ring[idx]); struct rx_desc *rd = &(vptr->rd_ring[idx]);
...@@ -1318,15 +1368,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) ...@@ -1318,15 +1368,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
skb = rd_info->skb; skb = rd_info->skb;
skb->dev = vptr->dev; skb->dev = vptr->dev;
pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
PCI_DMA_FROMDEVICE); vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
rd_info->skb_dma = (dma_addr_t) NULL;
rd_info->skb = NULL;
velocity_iph_realign(vptr, skb, pkt_len);
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, skb->dev);
/* /*
* Drop frame not meeting IEEE 802.3 * Drop frame not meeting IEEE 802.3
...@@ -1339,11 +1382,21 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) ...@@ -1339,11 +1382,21 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
} }
} }
pci_action = pci_dma_sync_single_for_device;
velocity_rx_csum(rd, skb); velocity_rx_csum(rd, skb);
/* if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
* FIXME: need rx_copybreak handling velocity_iph_realign(vptr, skb, pkt_len);
*/ pci_action = pci_unmap_single;
rd_info->skb = NULL;
}
pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, skb->dev);
stats->rx_bytes += pkt_len; stats->rx_bytes += pkt_len;
netif_rx(skb); netif_rx(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment