Commit 3af5b745 authored by Jeff Garzik's avatar Jeff Garzik

Merge pobox.com:/garz/repo/netdev-2.6/via-rhine

into pobox.com:/garz/repo/net-drivers-2.6
parents 25d477da 182181d0
......@@ -177,6 +177,7 @@ static const int multicast_filter_limit = 32;
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
......@@ -208,27 +209,15 @@ KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald
#ifdef CONFIG_VIA_RHINE_MMIO
#define USE_MMIO
#else
#undef readb
#undef readw
#undef readl
#undef writeb
#undef writew
#undef writel
#define readb inb
#define readw inw
#define readl inl
#define writeb outb
#define writew outw
#define writel outl
#endif
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(debug, "i");
MODULE_PARM(rx_copybreak, "i");
module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
......@@ -363,7 +352,7 @@ enum rhine_quirks {
*/
/* Beware of PCI posted writes */
#define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
static struct pci_device_id rhine_pci_tbl[] =
{
......@@ -500,6 +489,7 @@ struct rhine_private {
u8 tx_thresh, rx_thresh;
struct mii_if_info mii_if;
void __iomem *base;
};
static int mdio_read(struct net_device *dev, int phy_id, int location);
......@@ -529,14 +519,14 @@ static void rhine_shutdown (struct device *gdev);
static inline u32 get_intr_status(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u32 intr_status;
intr_status = readw(ioaddr + IntrStatus);
intr_status = ioread16(ioaddr + IntrStatus);
/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
if (rp->quirks & rqStatusWBRace)
intr_status |= readb(ioaddr + IntrStatus2) << 16;
intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
return intr_status;
}
......@@ -546,32 +536,32 @@ static inline u32 get_intr_status(struct net_device *dev)
*/
static void rhine_power_init(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u16 wolstat;
if (rp->quirks & rqWOL) {
/* Make sure chip is in power state D0 */
writeb(readb(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
/* Disable "force PME-enable" */
writeb(0x80, ioaddr + WOLcgClr);
iowrite8(0x80, ioaddr + WOLcgClr);
/* Clear power-event config bits (WOL) */
writeb(0xFF, ioaddr + WOLcrClr);
iowrite8(0xFF, ioaddr + WOLcrClr);
/* More recent cards can manage two additional patterns */
if (rp->quirks & rq6patterns)
writeb(0x03, ioaddr + WOLcrClr1);
iowrite8(0x03, ioaddr + WOLcrClr1);
/* Save power-event status bits */
wolstat = readb(ioaddr + PwrcsrSet);
wolstat = ioread8(ioaddr + PwrcsrSet);
if (rp->quirks & rq6patterns)
wolstat |= (readb(ioaddr + PwrcsrSet1) & 0x03) << 8;
wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
/* Clear power-event status bits */
writeb(0xFF, ioaddr + PwrcsrClr);
iowrite8(0xFF, ioaddr + PwrcsrClr);
if (rp->quirks & rq6patterns)
writeb(0x03, ioaddr + PwrcsrClr1);
iowrite8(0x03, ioaddr + PwrcsrClr1);
if (wolstat) {
char *reason;
......@@ -602,27 +592,27 @@ static void rhine_power_init(struct net_device *dev)
static void rhine_chip_reset(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
writeb(Cmd1Reset, ioaddr + ChipCmd1);
iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
IOSYNC;
if (readb(ioaddr + ChipCmd1) & Cmd1Reset) {
if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
printk(KERN_INFO "%s: Reset not complete yet. "
"Trying harder.\n", DRV_NAME);
/* Force reset */
if (rp->quirks & rqForceReset)
writeb(0x40, ioaddr + MiscCmd);
iowrite8(0x40, ioaddr + MiscCmd);
/* Reset can take somewhat longer (rare) */
RHINE_WAIT_FOR(!(readb(ioaddr + ChipCmd1) & Cmd1Reset));
RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
}
if (debug > 1)
printk(KERN_INFO "%s: Reset %s.\n", dev->name,
(readb(ioaddr + ChipCmd1) & Cmd1Reset) ?
(ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
"failed" : "succeeded");
}
......@@ -647,8 +637,8 @@ static void enable_mmio(long pioaddr, u32 quirks)
*/
static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
outb(0x20, pioaddr + MACRegEEcsr);
RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
......@@ -664,7 +654,7 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
/* Turn off EEPROM-controlled wake-up (magic packet) */
if (rp->quirks & rqWOL)
writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
}
......@@ -702,9 +692,14 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
u32 quirks;
long pioaddr;
long memaddr;
long ioaddr;
void __iomem *ioaddr;
int io_size, phy_id;
const char *name;
#ifdef USE_MMIO
int bar = 1;
#else
int bar = 0;
#endif
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
......@@ -783,10 +778,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rc)
goto err_out_free_netdev;
#ifdef USE_MMIO
enable_mmio(pioaddr, quirks);
ioaddr = (long) ioremap(memaddr, io_size);
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
rc = -EIO;
printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
......@@ -794,6 +786,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
#ifdef USE_MMIO
enable_mmio(pioaddr, quirks);
/* Check that selected MMIO registers match the PIO ones */
i = 0;
while (mmio_verify_registers[i]) {
......@@ -807,18 +802,17 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
}
#else
ioaddr = pioaddr;
#endif /* USE_MMIO */
dev->base_addr = ioaddr;
dev->base_addr = (unsigned long)ioaddr;
rp->base = ioaddr;
/* Get chip registers into a sane state */
rhine_power_init(dev);
rhine_hw_init(dev, pioaddr);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
if (!is_valid_ether_addr(dev->dev_addr)) {
rc = -EIO;
......@@ -828,7 +822,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
/* For Rhine-I/II, phy_id is loaded from EEPROM */
if (!phy_id)
phy_id = readb(ioaddr + 0x6C);
phy_id = ioread8(ioaddr + 0x6C);
dev->irq = pdev->irq;
......@@ -901,10 +895,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
return 0;
err_out_unmap:
#ifdef USE_MMIO
iounmap((void *)ioaddr);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
#endif
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev(dev);
......@@ -947,7 +939,7 @@ static int alloc_ring(struct net_device* dev)
return 0;
}
void free_ring(struct net_device* dev)
static void free_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
......@@ -1071,102 +1063,102 @@ static void free_tbufs(struct net_device* dev)
static void rhine_check_media(struct net_device *dev, unsigned int init_media)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
mii_check_media(&rp->mii_if, debug, init_media);
if (rp->mii_if.full_duplex)
writeb(readb(ioaddr + ChipCmd1) | Cmd1FDuplex,
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
ioaddr + ChipCmd1);
else
writeb(readb(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
ioaddr + ChipCmd1);
}
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
int i;
for (i = 0; i < 6; i++)
writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
/* Initialize other registers. */
writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
/* Configure initial FIFO thresholds. */
writeb(0x20, ioaddr + TxConfig);
iowrite8(0x20, ioaddr + TxConfig);
rp->tx_thresh = 0x20;
rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
writel(rp->rx_ring_dma, ioaddr + RxRingPtr);
writel(rp->tx_ring_dma, ioaddr + TxRingPtr);
iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
rhine_set_rx_mode(dev);
/* Enable interrupts by setting the interrupt mask. */
writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
IntrTxDone | IntrTxError | IntrTxUnderrun |
IntrPCIErr | IntrStatsMax | IntrLinkChange,
ioaddr + IntrEnable);
writew(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
ioaddr + ChipCmd);
rhine_check_media(dev, 1);
}
/* Enable MII link status auto-polling (required for IntrLinkChange) */
static void rhine_enable_linkmon(long ioaddr)
static void rhine_enable_linkmon(void __iomem *ioaddr)
{
writeb(0, ioaddr + MIICmd);
writeb(MII_BMSR, ioaddr + MIIRegAddr);
writeb(0x80, ioaddr + MIICmd);
iowrite8(0, ioaddr + MIICmd);
iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
iowrite8(0x80, ioaddr + MIICmd);
RHINE_WAIT_FOR((readb(ioaddr + MIIRegAddr) & 0x20));
RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
writeb(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
}
/* Disable MII link status auto-polling (required for MDIO access) */
static void rhine_disable_linkmon(long ioaddr, u32 quirks)
static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
{
writeb(0, ioaddr + MIICmd);
iowrite8(0, ioaddr + MIICmd);
if (quirks & rqRhineI) {
writeb(0x01, ioaddr + MIIRegAddr); // MII_BMSR
iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
/* Can be called from ISR. Evil. */
mdelay(1);
/* 0x80 must be set immediately before turning it off */
writeb(0x80, ioaddr + MIICmd);
iowrite8(0x80, ioaddr + MIICmd);
RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x20);
RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
/* Heh. Now clear 0x80 again. */
writeb(0, ioaddr + MIICmd);
iowrite8(0, ioaddr + MIICmd);
}
else
RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x80);
RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
}
/* Read and write over the MII Management Data I/O (MDIO) interface. */
static int mdio_read(struct net_device *dev, int phy_id, int regnum)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
int result;
rhine_disable_linkmon(ioaddr, rp->quirks);
/* rhine_disable_linkmon already cleared MIICmd */
writeb(phy_id, ioaddr + MIIPhyAddr);
writeb(regnum, ioaddr + MIIRegAddr);
writeb(0x40, ioaddr + MIICmd); /* Trigger read */
RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x40));
result = readw(ioaddr + MIIData);
iowrite8(phy_id, ioaddr + MIIPhyAddr);
iowrite8(regnum, ioaddr + MIIRegAddr);
iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
result = ioread16(ioaddr + MIIData);
rhine_enable_linkmon(ioaddr);
return result;
......@@ -1175,16 +1167,16 @@ static int mdio_read(struct net_device *dev, int phy_id, int regnum)
static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
rhine_disable_linkmon(ioaddr, rp->quirks);
/* rhine_disable_linkmon already cleared MIICmd */
writeb(phy_id, ioaddr + MIIPhyAddr);
writeb(regnum, ioaddr + MIIRegAddr);
writew(value, ioaddr + MIIData);
writeb(0x20, ioaddr + MIICmd); /* Trigger write */
RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x20));
iowrite8(phy_id, ioaddr + MIIPhyAddr);
iowrite8(regnum, ioaddr + MIIRegAddr);
iowrite16(value, ioaddr + MIIData);
iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
rhine_enable_linkmon(ioaddr);
}
......@@ -1192,7 +1184,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value
static int rhine_open(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
int rc;
rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
......@@ -1214,7 +1206,7 @@ static int rhine_open(struct net_device *dev)
if (debug > 2)
printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
"MII status: %4.4x.\n",
dev->name, readw(ioaddr + ChipCmd),
dev->name, ioread16(ioaddr + ChipCmd),
mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
netif_start_queue(dev);
......@@ -1225,11 +1217,11 @@ static int rhine_open(struct net_device *dev)
static void rhine_tx_timeout(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
"%4.4x, resetting...\n",
dev->name, readw(ioaddr + IntrStatus),
dev->name, ioread16(ioaddr + IntrStatus),
mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
/* protect against concurrent rx interrupts */
......@@ -1258,7 +1250,7 @@ static void rhine_tx_timeout(struct net_device *dev)
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
unsigned entry;
/* Caution: the write order is important here, set the field
......@@ -1276,7 +1268,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
rp->tx_skbuff[entry] = skb;
if ((rp->quirks & rqRhineI) &&
(((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
(((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
/* Must use alignment buffer. */
if (skb->len > PKT_BUF_SZ) {
/* packet too long, drop it */
......@@ -1311,7 +1303,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
/* Non-x86 Todo: explicitly flush cache lines here. */
/* Wake the potentially-idle transmit channel */
writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
IOSYNC;
......@@ -1334,20 +1326,19 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
{
struct net_device *dev = dev_instance;
long ioaddr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u32 intr_status;
int boguscnt = max_interrupt_work;
int handled = 0;
ioaddr = dev->base_addr;
while ((intr_status = get_intr_status(dev))) {
handled = 1;
/* Acknowledge all of the current interrupt sources ASAP. */
if (intr_status & IntrTxDescRace)
writeb(0x08, ioaddr + IntrStatus2);
writew(intr_status & 0xffff, ioaddr + IntrStatus);
iowrite8(0x08, ioaddr + IntrStatus2);
iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
IOSYNC;
if (debug > 4)
......@@ -1361,9 +1352,9 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
if (intr_status & IntrTxErrSummary) {
/* Avoid scavenging before Tx engine turned off */
RHINE_WAIT_FOR(!(readb(ioaddr+ChipCmd) & CmdTxOn));
RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
if (debug > 2 &&
readb(ioaddr+ChipCmd) & CmdTxOn)
ioread8(ioaddr+ChipCmd) & CmdTxOn)
printk(KERN_WARNING "%s: "
"rhine_interrupt() Tx engine"
"still on.\n", dev->name);
......@@ -1387,7 +1378,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
if (debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
dev->name, readw(ioaddr + IntrStatus));
dev->name, ioread16(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
......@@ -1582,16 +1573,16 @@ static void rhine_rx(struct net_device *dev)
* these, for others the counters are set to 1 when written to and
* instead cleared when read. So we clear them both ways ...
*/
static inline void clear_tally_counters(const long ioaddr)
static inline void clear_tally_counters(void __iomem *ioaddr)
{
writel(0, ioaddr + RxMissed);
readw(ioaddr + RxCRCErrs);
readw(ioaddr + RxMissed);
iowrite32(0, ioaddr + RxMissed);
ioread16(ioaddr + RxCRCErrs);
ioread16(ioaddr + RxMissed);
}
static void rhine_restart_tx(struct net_device *dev) {
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
int entry = rp->dirty_tx % TX_RING_SIZE;
u32 intr_status;
......@@ -1604,12 +1595,12 @@ static void rhine_restart_tx(struct net_device *dev) {
if ((intr_status & IntrTxErrSummary) == 0) {
/* We know better than the chip where it should continue. */
writel(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
ioaddr + TxRingPtr);
writeb(readb(ioaddr + ChipCmd) | CmdTxOn,
iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
ioaddr + ChipCmd);
writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
IOSYNC;
}
......@@ -1626,15 +1617,15 @@ static void rhine_restart_tx(struct net_device *dev) {
static void rhine_error(struct net_device *dev, int intr_status)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
spin_lock(&rp->lock);
if (intr_status & IntrLinkChange)
rhine_check_media(dev, 0);
if (intr_status & IntrStatsMax) {
rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
clear_tally_counters(ioaddr);
}
if (intr_status & IntrTxAborted) {
......@@ -1644,7 +1635,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
}
if (intr_status & IntrTxUnderrun) {
if (rp->tx_thresh < 0xE0)
writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
if (debug > 1)
printk(KERN_INFO "%s: Transmitter underrun, Tx "
"threshold now %2.2x.\n",
......@@ -1659,7 +1650,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
(intr_status & (IntrTxAborted |
IntrTxUnderrun | IntrTxDescRace)) == 0) {
if (rp->tx_thresh < 0xE0) {
writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
}
if (debug > 1)
printk(KERN_INFO "%s: Unspecified error. Tx "
......@@ -1684,12 +1675,12 @@ static void rhine_error(struct net_device *dev, int intr_status)
static struct net_device_stats *rhine_get_stats(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
unsigned long flags;
spin_lock_irqsave(&rp->lock, flags);
rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
clear_tally_counters(ioaddr);
spin_unlock_irqrestore(&rp->lock, flags);
......@@ -1699,7 +1690,7 @@ static struct net_device_stats *rhine_get_stats(struct net_device *dev)
static void rhine_set_rx_mode(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
u32 mc_filter[2]; /* Multicast hash filter */
u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
......@@ -1708,13 +1699,13 @@ static void rhine_set_rx_mode(struct net_device *dev)
printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
dev->name);
rx_mode = 0x1C;
writel(0xffffffff, ioaddr + MulticastFilter0);
writel(0xffffffff, ioaddr + MulticastFilter1);
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
} else if ((dev->mc_count > multicast_filter_limit)
|| (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writel(0xffffffff, ioaddr + MulticastFilter0);
writel(0xffffffff, ioaddr + MulticastFilter1);
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
rx_mode = 0x0C;
} else {
struct dev_mc_list *mclist;
......@@ -1726,11 +1717,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
writel(mc_filter[0], ioaddr + MulticastFilter0);
writel(mc_filter[1], ioaddr + MulticastFilter1);
iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
rx_mode = 0x0C;
}
writeb(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
}
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
......@@ -1854,8 +1845,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int rhine_close(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
spin_lock_irq(&rp->lock);
......@@ -1864,16 +1855,16 @@ static int rhine_close(struct net_device *dev)
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
"status was %4.4x.\n",
dev->name, readw(ioaddr + ChipCmd));
dev->name, ioread16(ioaddr + ChipCmd));
/* Switch to loopback mode to avoid hardware races. */
writeb(rp->tx_thresh | 0x02, ioaddr + TxConfig);
iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
/* Disable interrupts by clearing the interrupt mask. */
writew(0x0000, ioaddr + IntrEnable);
iowrite16(0x0000, ioaddr + IntrEnable);
/* Stop the chip's Tx and Rx processes. */
writew(CmdStop, ioaddr + ChipCmd);
iowrite16(CmdStop, ioaddr + ChipCmd);
spin_unlock_irq(&rp->lock);
......@@ -1889,15 +1880,13 @@ static int rhine_close(struct net_device *dev)
static void __devexit rhine_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
unregister_netdev(dev);
pci_iounmap(pdev, rp->base);
pci_release_regions(pdev);
#ifdef USE_MMIO
iounmap((char *)(dev->base_addr));
#endif
free_netdev(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
......@@ -1908,33 +1897,40 @@ static void rhine_shutdown (struct device *gendev)
struct pci_dev *pdev = to_pci_dev(gendev);
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
rhine_power_init(dev);
/* Make sure we use pattern 0, 1 and not 4, 5 */
if (rp->quirks & rq6patterns)
writeb(0x04, ioaddr + 0xA7);
iowrite8(0x04, ioaddr + 0xA7);
if (rp->wolopts & WAKE_MAGIC)
writeb(WOLmagic, ioaddr + WOLcrSet);
if (rp->wolopts & WAKE_MAGIC) {
iowrite8(WOLmagic, ioaddr + WOLcrSet);
/*
* Turn EEPROM-controlled wake-up back on -- some hardware may
* not cooperate otherwise.
*/
iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
}
if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
writeb(WOLbmcast, ioaddr + WOLcgSet);
iowrite8(WOLbmcast, ioaddr + WOLcgSet);
if (rp->wolopts & WAKE_PHY)
writeb(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
if (rp->wolopts & WAKE_UCAST)
writeb(WOLucast, ioaddr + WOLcrSet);
iowrite8(WOLucast, ioaddr + WOLcrSet);
if (rp->wolopts) {
/* Enable legacy WOL (for old motherboards) */
writeb(0x01, ioaddr + PwcfgSet);
writeb(readb(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
iowrite8(0x01, ioaddr + PwcfgSet);
iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
}
/* Hit power state D3 (sleep) */
writeb(readb(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
/* TODO: Check use of pci_enable_wake() */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment