Commit 5fda1dbb authored by Jeff Garzik's avatar Jeff Garzik

Merge pobox.com:/spare/repo/linux-2.6

into pobox.com:/spare/repo/netdev-2.6/epic100
parents 004a3668 05102b62
...@@ -80,8 +80,6 @@ ...@@ -80,8 +80,6 @@
These may be modified when a driver module is loaded.*/ These may be modified when a driver module is loaded.*/
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 32;
/* Used to pass the full-duplex flag, etc. */ /* Used to pass the full-duplex flag, etc. */
#define MAX_UNITS 8 /* More are supported, limit only on options */ #define MAX_UNITS 8 /* More are supported, limit only on options */
...@@ -99,9 +97,9 @@ static int rx_copybreak; ...@@ -99,9 +97,9 @@ static int rx_copybreak;
Making the Tx ring too large decreases the effectiveness of channel Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority. bonding and packet priority.
There are no ill effects from too-large receive rings. */ There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 16 #define TX_RING_SIZE 256
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ #define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
#define RX_RING_SIZE 32 #define RX_RING_SIZE 256
#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc) #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
...@@ -152,12 +150,10 @@ MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver"); ...@@ -152,12 +150,10 @@ MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_PARM(debug, "i"); MODULE_PARM(debug, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(rx_copybreak, "i"); MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)"); MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex"); MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)"); MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
...@@ -289,6 +285,12 @@ enum CommandBits { ...@@ -289,6 +285,12 @@ enum CommandBits {
StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80, StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
}; };
#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
#define EpicNapiEvent (TxEmpty | TxDone | \
RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
static u16 media2miictl[16] = { static u16 media2miictl[16] = {
0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 }; 0, 0, 0, 0, 0, 0, 0, 0 };
...@@ -327,9 +329,12 @@ struct epic_private { ...@@ -327,9 +329,12 @@ struct epic_private {
/* Ring pointers. */ /* Ring pointers. */
spinlock_t lock; /* Group with Tx control cache line. */ spinlock_t lock; /* Group with Tx control cache line. */
spinlock_t napi_lock;
unsigned int reschedule_in_poll;
unsigned int cur_tx, dirty_tx; unsigned int cur_tx, dirty_tx;
unsigned int cur_rx, dirty_rx; unsigned int cur_rx, dirty_rx;
u32 irq_mask;
unsigned int rx_buf_sz; /* Based on MTU+slack. */ unsigned int rx_buf_sz; /* Based on MTU+slack. */
struct pci_dev *pci_dev; /* PCI bus location. */ struct pci_dev *pci_dev; /* PCI bus location. */
...@@ -356,7 +361,8 @@ static void epic_timer(unsigned long data); ...@@ -356,7 +361,8 @@ static void epic_timer(unsigned long data);
static void epic_tx_timeout(struct net_device *dev); static void epic_tx_timeout(struct net_device *dev);
static void epic_init_ring(struct net_device *dev); static void epic_init_ring(struct net_device *dev);
static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev); static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int epic_rx(struct net_device *dev); static int epic_rx(struct net_device *dev, int budget);
static int epic_poll(struct net_device *dev, int *budget);
static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs); static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct ethtool_ops netdev_ethtool_ops; static struct ethtool_ops netdev_ethtool_ops;
...@@ -375,7 +381,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, ...@@ -375,7 +381,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
int irq; int irq;
struct net_device *dev; struct net_device *dev;
struct epic_private *ep; struct epic_private *ep;
int i, option = 0, duplex = 0; int i, ret, option = 0, duplex = 0;
void *ring_space; void *ring_space;
dma_addr_t ring_dma; dma_addr_t ring_dma;
...@@ -389,29 +395,33 @@ static int __devinit epic_init_one (struct pci_dev *pdev, ...@@ -389,29 +395,33 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
card_idx++; card_idx++;
i = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (i) if (ret)
return i; goto out;
irq = pdev->irq; irq = pdev->irq;
if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) { if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
printk (KERN_ERR "card %d: no PCI region space\n", card_idx); printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
return -ENODEV; ret = -ENODEV;
goto err_out_disable;
} }
pci_set_master(pdev); pci_set_master(pdev);
ret = pci_request_regions(pdev, DRV_NAME);
if (ret < 0)
goto err_out_disable;
ret = -ENOMEM;
dev = alloc_etherdev(sizeof (*ep)); dev = alloc_etherdev(sizeof (*ep));
if (!dev) { if (!dev) {
printk (KERN_ERR "card %d: no memory for eth device\n", card_idx); printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
return -ENOMEM; goto err_out_free_res;
} }
SET_MODULE_OWNER(dev); SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_request_regions(pdev, DRV_NAME))
goto err_out_free_netdev;
#ifdef USE_IO_OPS #ifdef USE_IO_OPS
ioaddr = pci_resource_start (pdev, 0); ioaddr = pci_resource_start (pdev, 0);
#else #else
...@@ -419,7 +429,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, ...@@ -419,7 +429,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1)); ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
if (!ioaddr) { if (!ioaddr) {
printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx); printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
goto err_out_free_res; goto err_out_free_netdev;
} }
#endif #endif
...@@ -456,7 +466,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev, ...@@ -456,7 +466,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
dev->base_addr = ioaddr; dev->base_addr = ioaddr;
dev->irq = irq; dev->irq = irq;
spin_lock_init (&ep->lock); spin_lock_init(&ep->lock);
spin_lock_init(&ep->napi_lock);
ep->reschedule_in_poll = 0;
/* Bring the chip out of low-power mode. */ /* Bring the chip out of low-power mode. */
outl(0x4200, ioaddr + GENCTL); outl(0x4200, ioaddr + GENCTL);
...@@ -486,6 +498,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev, ...@@ -486,6 +498,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ep->pci_dev = pdev; ep->pci_dev = pdev;
ep->chip_id = chip_idx; ep->chip_id = chip_idx;
ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
ep->irq_mask =
(ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
| CntFull | TxUnderrun | EpicNapiEvent;
/* Find the connected MII xcvrs. /* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later, but Doing this in open() would allow detecting external xcvrs later, but
...@@ -540,10 +555,12 @@ static int __devinit epic_init_one (struct pci_dev *pdev, ...@@ -540,10 +555,12 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
dev->ethtool_ops = &netdev_ethtool_ops; dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT; dev->watchdog_timeo = TX_TIMEOUT;
dev->tx_timeout = &epic_tx_timeout; dev->tx_timeout = &epic_tx_timeout;
dev->poll = epic_poll;
dev->weight = 64;
i = register_netdev(dev); ret = register_netdev(dev);
if (i) if (ret < 0)
goto err_out_unmap_tx; goto err_out_unmap_rx;
printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq); dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
...@@ -551,19 +568,24 @@ static int __devinit epic_init_one (struct pci_dev *pdev, ...@@ -551,19 +568,24 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
printk("%2.2x:", dev->dev_addr[i]); printk("%2.2x:", dev->dev_addr[i]);
printk("%2.2x.\n", dev->dev_addr[i]); printk("%2.2x.\n", dev->dev_addr[i]);
return 0; out:
return ret;
err_out_unmap_rx:
pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
err_out_unmap_tx: err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
err_out_iounmap: err_out_iounmap:
#ifndef USE_IO_OPS #ifndef USE_IO_OPS
iounmap(ioaddr); iounmap(ioaddr);
err_out_free_res:
#endif
pci_release_regions(pdev);
err_out_free_netdev: err_out_free_netdev:
#endif
free_netdev(dev); free_netdev(dev);
return -ENODEV; err_out_free_res:
pci_release_regions(pdev);
err_out_disable:
pci_disable_device(pdev);
goto out;
} }
/* Serial EEPROM section. */ /* Serial EEPROM section. */
...@@ -589,6 +611,38 @@ static int __devinit epic_init_one (struct pci_dev *pdev, ...@@ -589,6 +611,38 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
#define EE_READ256_CMD (6 << 8) #define EE_READ256_CMD (6 << 8)
#define EE_ERASE_CMD (7 << 6) #define EE_ERASE_CMD (7 << 6)
static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
{
long ioaddr = dev->base_addr;
outl(0x00000000, ioaddr + INTMASK);
}
static inline void __epic_pci_commit(long ioaddr)
{
#ifndef USE_IO_OPS
inl(ioaddr + INTMASK);
#endif
}
static inline void epic_napi_irq_off(struct net_device *dev,
struct epic_private *ep)
{
long ioaddr = dev->base_addr;
outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
__epic_pci_commit(ioaddr);
}
static inline void epic_napi_irq_on(struct net_device *dev,
struct epic_private *ep)
{
long ioaddr = dev->base_addr;
/* No need to commit possible posted write */
outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
}
static int __devinit read_eeprom(long ioaddr, int location) static int __devinit read_eeprom(long ioaddr, int location)
{ {
int i; int i;
...@@ -749,9 +803,8 @@ static int epic_open(struct net_device *dev) ...@@ -749,9 +803,8 @@ static int epic_open(struct net_device *dev)
/* Enable interrupts by setting the interrupt mask. */ /* Enable interrupts by setting the interrupt mask. */
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
| CntFull | TxUnderrun | TxDone | TxEmpty | CntFull | TxUnderrun
| RxError | RxOverflow | RxFull | RxHeader | RxDone, | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
ioaddr + INTMASK);
if (debug > 1) if (debug > 1)
printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x " printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
...@@ -792,7 +845,7 @@ static void epic_pause(struct net_device *dev) ...@@ -792,7 +845,7 @@ static void epic_pause(struct net_device *dev)
} }
/* Remove the packets on the Rx queue. */ /* Remove the packets on the Rx queue. */
epic_rx(dev); epic_rx(dev, RX_RING_SIZE);
} }
static void epic_restart(struct net_device *dev) static void epic_restart(struct net_device *dev)
...@@ -838,9 +891,9 @@ static void epic_restart(struct net_device *dev) ...@@ -838,9 +891,9 @@ static void epic_restart(struct net_device *dev)
/* Enable interrupts by setting the interrupt mask. */ /* Enable interrupts by setting the interrupt mask. */
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
| CntFull | TxUnderrun | TxDone | TxEmpty | CntFull | TxUnderrun
| RxError | RxOverflow | RxFull | RxHeader | RxDone, | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
ioaddr + INTMASK);
printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
" interrupt %4.4x.\n", " interrupt %4.4x.\n",
dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
...@@ -926,7 +979,6 @@ static void epic_init_ring(struct net_device *dev) ...@@ -926,7 +979,6 @@ static void epic_init_ring(struct net_device *dev)
int i; int i;
ep->tx_full = 0; ep->tx_full = 0;
ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
ep->dirty_tx = ep->cur_tx = 0; ep->dirty_tx = ep->cur_tx = 0;
ep->cur_rx = ep->dirty_rx = 0; ep->cur_rx = ep->dirty_rx = 0;
ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
...@@ -1026,6 +1078,76 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1026,6 +1078,76 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0; return 0;
} }
static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
int status)
{
struct net_device_stats *stats = &ep->stats;
#ifndef final_version
/* There was an major error, log it. */
if (debug > 1)
printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
dev->name, status);
#endif
stats->tx_errors++;
if (status & 0x1050)
stats->tx_aborted_errors++;
if (status & 0x0008)
stats->tx_carrier_errors++;
if (status & 0x0040)
stats->tx_window_errors++;
if (status & 0x0010)
stats->tx_fifo_errors++;
}
static void epic_tx(struct net_device *dev, struct epic_private *ep)
{
unsigned int dirty_tx, cur_tx;
/*
* Note: if this lock becomes a problem we can narrow the locked
* region at the cost of occasionally grabbing the lock more times.
*/
cur_tx = ep->cur_tx;
for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
struct sk_buff *skb;
int entry = dirty_tx % TX_RING_SIZE;
int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
if (txstatus & DescOwn)
break; /* It still hasn't been Txed */
if (likely(txstatus & 0x0001)) {
ep->stats.collisions += (txstatus >> 8) & 15;
ep->stats.tx_packets++;
ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
} else
epic_tx_error(dev, ep, txstatus);
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
ep->tx_skbuff[entry] = 0;
}
#ifndef final_version
if (cur_tx - dirty_tx > TX_RING_SIZE) {
printk(KERN_WARNING
"%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
dev->name, dirty_tx, cur_tx, ep->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
ep->dirty_tx = dirty_tx;
if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
/* The ring is no longer full, allow new TX entries. */
ep->tx_full = 0;
netif_wake_queue(dev);
}
}
/* The interrupt handler does all of the Rx thread work and cleans up /* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */ after the Tx thread. */
static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs) static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
...@@ -1033,135 +1155,71 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *r ...@@ -1033,135 +1155,71 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *r
struct net_device *dev = dev_instance; struct net_device *dev = dev_instance;
struct epic_private *ep = dev->priv; struct epic_private *ep = dev->priv;
long ioaddr = dev->base_addr; long ioaddr = dev->base_addr;
int status, boguscnt = max_interrupt_work;
unsigned int handled = 0; unsigned int handled = 0;
int status;
do { status = inl(ioaddr + INTSTAT);
status = inl(ioaddr + INTSTAT); /* Acknowledge all of the current interrupt sources ASAP. */
/* Acknowledge all of the current interrupt sources ASAP. */ outl(status & EpicNormalEvent, ioaddr + INTSTAT);
outl(status & 0x00007fff, ioaddr + INTSTAT);
if (debug > 4) if (debug > 4) {
printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
"intstat=%#8.8x.\n", "intstat=%#8.8x.\n", dev->name, status,
dev->name, status, (int)inl(ioaddr + INTSTAT)); (int)inl(ioaddr + INTSTAT));
}
if ((status & IntrSummary) == 0) if ((status & IntrSummary) == 0)
break; goto out;
handled = 1;
if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
epic_rx(dev);
if (status & (TxEmpty | TxDone)) {
unsigned int dirty_tx, cur_tx;
/* Note: if this lock becomes a problem we can narrow the locked
region at the cost of occasionally grabbing the lock more
times. */
spin_lock(&ep->lock);
cur_tx = ep->cur_tx;
dirty_tx = ep->dirty_tx;
for (; cur_tx - dirty_tx > 0; dirty_tx++) {
struct sk_buff *skb;
int entry = dirty_tx % TX_RING_SIZE;
int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
if (txstatus & DescOwn)
break; /* It still hasn't been Txed */
if ( ! (txstatus & 0x0001)) {
/* There was an major error, log it. */
#ifndef final_version
if (debug > 1)
printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
dev->name, txstatus);
#endif
ep->stats.tx_errors++;
if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
if (txstatus & 0x0040) ep->stats.tx_window_errors++;
if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
} else {
ep->stats.collisions += (txstatus >> 8) & 15;
ep->stats.tx_packets++;
ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
}
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
ep->tx_skbuff[entry] = 0;
}
#ifndef final_version handled = 1;
if (cur_tx - dirty_tx > TX_RING_SIZE) {
printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
dev->name, dirty_tx, cur_tx, ep->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
ep->dirty_tx = dirty_tx;
if (ep->tx_full
&& cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
/* The ring is no longer full, allow new TX entries. */
ep->tx_full = 0;
spin_unlock(&ep->lock);
netif_wake_queue(dev);
} else
spin_unlock(&ep->lock);
}
/* Check uncommon events all at once. */ if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
if (status & (CntFull | TxUnderrun | RxOverflow | RxFull | spin_lock(&ep->napi_lock);
PCIBusErr170 | PCIBusErr175)) { if (netif_rx_schedule_prep(dev)) {
if (status == 0xffffffff) /* Chip failed or removed (CardBus). */ epic_napi_irq_off(dev, ep);
break; __netif_rx_schedule(dev);
/* Always update the error counts to avoid overhead later. */ } else
ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); ep->reschedule_in_poll++;
ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); spin_unlock(&ep->napi_lock);
ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); }
status &= ~EpicNapiEvent;
if (status & TxUnderrun) { /* Tx FIFO underflow. */
ep->stats.tx_fifo_errors++; /* Check uncommon events all at once. */
outl(ep->tx_threshold += 128, ioaddr + TxThresh); if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
/* Restart the transmit process. */ if (status == EpicRemoved)
outl(RestartTx, ioaddr + COMMAND); goto out;
}
if (status & RxOverflow) { /* Missed a Rx frame. */ /* Always update the error counts to avoid overhead later. */
ep->stats.rx_errors++; ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
} ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
if (status & (RxOverflow | RxFull)) ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
outw(RxQueued, ioaddr + COMMAND);
if (status & PCIBusErr170) { if (status & TxUnderrun) { /* Tx FIFO underflow. */
printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n", ep->stats.tx_fifo_errors++;
dev->name, status); outl(ep->tx_threshold += 128, ioaddr + TxThresh);
epic_pause(dev); /* Restart the transmit process. */
epic_restart(dev); outl(RestartTx, ioaddr + COMMAND);
}
/* Clear all error sources. */
outl(status & 0x7f18, ioaddr + INTSTAT);
} }
if (--boguscnt < 0) { if (status & PCIBusErr170) {
printk(KERN_ERR "%s: Too much work at interrupt, " printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
"IntrStatus=0x%8.8x.\n", dev->name, status);
dev->name, status); epic_pause(dev);
/* Clear all interrupt sources. */ epic_restart(dev);
outl(0x0001ffff, ioaddr + INTSTAT);
break;
} }
} while (1); /* Clear all error sources. */
outl(status & 0x7f18, ioaddr + INTSTAT);
}
if (debug > 3) out:
printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n", if (debug > 3) {
dev->name, status); printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
dev->name, status);
}
return IRQ_RETVAL(handled); return IRQ_RETVAL(handled);
} }
static int epic_rx(struct net_device *dev) static int epic_rx(struct net_device *dev, int budget)
{ {
struct epic_private *ep = dev->priv; struct epic_private *ep = dev->priv;
int entry = ep->cur_rx % RX_RING_SIZE; int entry = ep->cur_rx % RX_RING_SIZE;
...@@ -1171,6 +1229,10 @@ static int epic_rx(struct net_device *dev) ...@@ -1171,6 +1229,10 @@ static int epic_rx(struct net_device *dev)
if (debug > 4) if (debug > 4)
printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry, printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
ep->rx_ring[entry].rxstatus); ep->rx_ring[entry].rxstatus);
if (rx_work_limit > budget)
rx_work_limit = budget;
/* If we own the next entry, it's a new packet. Send it up. */ /* If we own the next entry, it's a new packet. Send it up. */
while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) { while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
int status = le32_to_cpu(ep->rx_ring[entry].rxstatus); int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
...@@ -1226,7 +1288,7 @@ static int epic_rx(struct net_device *dev) ...@@ -1226,7 +1288,7 @@ static int epic_rx(struct net_device *dev)
ep->rx_skbuff[entry] = NULL; ep->rx_skbuff[entry] = NULL;
} }
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); netif_receive_skb(skb);
dev->last_rx = jiffies; dev->last_rx = jiffies;
ep->stats.rx_packets++; ep->stats.rx_packets++;
ep->stats.rx_bytes += pkt_len; ep->stats.rx_bytes += pkt_len;
...@@ -1254,6 +1316,65 @@ static int epic_rx(struct net_device *dev) ...@@ -1254,6 +1316,65 @@ static int epic_rx(struct net_device *dev)
return work_done; return work_done;
} }
static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
{
long ioaddr = dev->base_addr;
int status;
status = inl(ioaddr + INTSTAT);
if (status == EpicRemoved)
return;
if (status & RxOverflow) /* Missed a Rx frame. */
ep->stats.rx_errors++;
if (status & (RxOverflow | RxFull))
outw(RxQueued, ioaddr + COMMAND);
}
static int epic_poll(struct net_device *dev, int *budget)
{
struct epic_private *ep = dev->priv;
int work_done, orig_budget;
long ioaddr = dev->base_addr;
orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
rx_action:
epic_tx(dev, ep);
work_done = epic_rx(dev, *budget);
epic_rx_err(dev, ep);
*budget -= work_done;
dev->quota -= work_done;
if (netif_running(dev) && (work_done < orig_budget)) {
unsigned long flags;
int more;
/* A bit baroque but it avoids a (space hungry) spin_unlock */
spin_lock_irqsave(&ep->napi_lock, flags);
more = ep->reschedule_in_poll;
if (!more) {
__netif_rx_complete(dev);
outl(EpicNapiEvent, ioaddr + INTSTAT);
epic_napi_irq_on(dev, ep);
} else
ep->reschedule_in_poll--;
spin_unlock_irqrestore(&ep->napi_lock, flags);
if (more)
goto rx_action;
}
return (work_done >= orig_budget);
}
static int epic_close(struct net_device *dev) static int epic_close(struct net_device *dev)
{ {
long ioaddr = dev->base_addr; long ioaddr = dev->base_addr;
...@@ -1268,9 +1389,13 @@ static int epic_close(struct net_device *dev) ...@@ -1268,9 +1389,13 @@ static int epic_close(struct net_device *dev)
dev->name, (int)inl(ioaddr + INTSTAT)); dev->name, (int)inl(ioaddr + INTSTAT));
del_timer_sync(&ep->timer); del_timer_sync(&ep->timer);
epic_pause(dev);
epic_disable_int(dev, ep);
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
epic_pause(dev);
/* Free all the skbuffs in the Rx queue. */ /* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
skb = ep->rx_skbuff[i]; skb = ep->rx_skbuff[i];
...@@ -1491,6 +1616,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev) ...@@ -1491,6 +1616,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
#endif #endif
pci_release_regions(pdev); pci_release_regions(pdev);
free_netdev(dev); free_netdev(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
/* pci_power_off(pdev, -1); */ /* pci_power_off(pdev, -1); */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment