Commit 23139076 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://gkernel.bkbits.net/net-drivers-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 2d7f7aaf 3af5b745
......@@ -1979,6 +1979,15 @@ config R8169_NAPI
If in doubt, say N.
config R8169_VLAN
bool "VLAN support"
depends on R8169 && VLAN_8021Q
---help---
Say Y here for the r8169 driver to support the functions required
by the kernel 802.1Q code.
If in doubt, say Y.
config SK98LIN
tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
depends on PCI
......@@ -2191,6 +2200,17 @@ config S2IO_NAPI
If in doubt, say N.
config 2BUFF_MODE
bool "Use 2 Buffer Mode on Rx side."
depends on S2IO
---help---
On enabling the 2 buffer mode, the received frame will be
split into 2 parts before being DMA'ed to the hosts memory.
The parts are the ethernet header and ethernet payload.
This is useful on systems where DMA'ing to to unaligned
physical memory loactions comes with a heavy price.
If not sure please say N.
endmenu
source "drivers/net/tokenring/Kconfig"
......
......@@ -6,6 +6,7 @@
History:
Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
May 20 2002 - Add link status force-mode and TBI mode support.
2004 - Massive updates. See kernel SCM system for details.
=========================================================================
1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
Command: 'insmod r8169 media = SET_MEDIA'
......@@ -33,22 +34,34 @@ VERSION 1.2 <2002/11/30>
- Copy mc_filter setup code from 8139cp
(includes an optimization, and avoids set_bit use)
VERSION 1.6LK <2004/04/14>
- Merge of Realtek's version 1.6
- Conversion to DMA API
- Suspend/resume
- Endianness
- Misc Rx/Tx bugs
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#define RTL8169_VERSION "1.2"
#define RTL8169_VERSION "1.6LK"
#define MODULENAME "r8169"
#define RTL8169_DRIVER_NAME MODULENAME " Gigabit Ethernet driver " RTL8169_VERSION
#define PFX MODULENAME ": "
......@@ -65,17 +78,23 @@ VERSION 1.2 <2002/11/30>
#define dprintk(fmt, args...) do {} while (0)
#endif /* RTL8169_DEBUG */
#define TX_BUFFS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
#ifdef CONFIG_R8169_NAPI
#define rtl8169_rx_skb netif_receive_skb
#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
#define rtl8169_rx_quota(count, quota) min(count, quota)
#else
#define rtl8169_rx_skb netif_rx
#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
#define rtl8169_rx_quota(count, quota) count
#endif
/* media options */
#define MAX_UNITS 8
static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static int num_media = 0;
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
......@@ -87,9 +106,6 @@ static int multicast_filter_limit = 32;
/* MAC address length*/
#define MAC_ADDR_LEN 6
/* max supported gigabit ethernet frame size -- must be at least (dev->mtu+14+4).*/
#define MAX_ETH_FRAME_SIZE 1536
#define TX_FIFO_THRESH 256 /* In bytes */
#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
......@@ -99,6 +115,7 @@ static int multicast_filter_limit = 32;
#define RxPacketMaxSize 0x0800 /* Maximum size supported is 16K-1 */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
#define R8169_REGS_SIZE 256
#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
......@@ -106,7 +123,6 @@ static int multicast_filter_limit = 32;
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL_MIN_IO_SIZE 0x80
#define RTL8169_TX_TIMEOUT (6*HZ)
#define RTL8169_PHY_TIMEOUT (10*HZ)
......@@ -122,7 +138,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_B = 0x00,
/* RTL_GIGA_MAC_VER_C = 0x03, */
RTL_GIGA_MAC_VER_D = 0x01,
RTL_GIGA_MAC_VER_E = 0x02
RTL_GIGA_MAC_VER_E = 0x02,
RTL_GIGA_MAC_VER_X = 0x04 /* Greater than RTL_GIGA_MAC_VER_E */
};
enum phy_version {
......@@ -305,28 +322,57 @@ enum RTL8169_register_content {
};
enum _DescStatusBit {
OWNbit = 0x80000000,
EORbit = 0x40000000,
FSbit = 0x20000000,
LSbit = 0x10000000,
DescOwn = (1 << 31), /* Descriptor is owned by NIC */
RingEnd = (1 << 30), /* End of descriptor ring */
FirstFrag = (1 << 29), /* First segment of a packet */
LastFrag = (1 << 28), /* Final segment of a packet */
/* Tx private */
LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
MSSShift = 16, /* MSS value position */
MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
IPCS = (1 << 18), /* Calculate IP checksum */
UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
TxVlanTag = (1 << 17), /* Add VLAN tag */
/* Rx private */
PID1 = (1 << 18), /* Protocol ID bit 1/2 */
PID0 = (1 << 17), /* Protocol ID bit 2/2 */
#define RxProtoUDP (PID1)
#define RxProtoTCP (PID0)
#define RxProtoIP (PID1 | PID0)
#define RxProtoMask RxProtoIP
IPFail = (1 << 16), /* IP checksum failed */
UDPFail = (1 << 15), /* UDP/IP checksum failed */
TCPFail = (1 << 14), /* TCP/IP checksum failed */
RxVlanTag = (1 << 16), /* VLAN tag available */
};
#define RsvdMask 0x3fffc000
struct TxDesc {
u32 status;
u32 vlan_tag;
u32 opts1;
u32 opts2;
u64 addr;
};
struct RxDesc {
u32 status;
u32 vlan_tag;
u32 opts1;
u32 opts2;
u64 addr;
};
struct ring_info {
struct sk_buff *skb;
u32 len;
u8 __pad[sizeof(void *) - sizeof(u32)];
};
struct rtl8169_private {
void *mmio_addr; /* memory map physical address */
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev; /* Index of PCI device */
struct net_device_stats stats; /* statistics of net device */
spinlock_t lock; /* spin lock flag */
......@@ -342,25 +388,29 @@ struct rtl8169_private {
dma_addr_t TxPhyAddr;
dma_addr_t RxPhyAddr;
struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
struct sk_buff *Tx_skbuff[NUM_TX_DESC]; /* Tx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
unsigned rx_buf_sz;
struct timer_list timer;
u16 cp_cmd;
u16 intr_mask;
int phy_auto_nego_reg;
int phy_1000_ctrl_reg;
#ifdef CONFIG_R8169_VLAN
struct vlan_group *vlgrp;
#endif
int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
void (*get_settings)(struct net_device *, struct ethtool_cmd *);
void (*phy_reset_enable)(void *);
unsigned int (*phy_reset_pending)(void *);
unsigned int (*link_ok)(void *);
void (*phy_reset_enable)(void __iomem *);
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
struct work_struct task;
};
MODULE_AUTHOR("Realtek");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(use_dac, "i");
module_param_array(media, int, &num_media, 0);
module_param(rx_copybreak, int, 0);
module_param(use_dac, int, 0);
MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
MODULE_LICENSE("GPL");
......@@ -374,6 +424,8 @@ static int rtl8169_close(struct net_device *dev);
static void rtl8169_set_rx_mode(struct net_device *dev);
static void rtl8169_tx_timeout(struct net_device *dev);
static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev);
static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
void __iomem *);
#ifdef CONFIG_R8169_NAPI
static int rtl8169_poll(struct net_device *dev, int *budget);
#endif
......@@ -390,7 +442,7 @@ static const unsigned int rtl8169_rx_config =
#define PHY_Cap_100_Half_Or_Less PHY_Cap_100_Half | PHY_Cap_10_Full_Or_Less
#define PHY_Cap_100_Full_Or_Less PHY_Cap_100_Full | PHY_Cap_100_Half_Or_Less
static void mdio_write(void *ioaddr, int RegAddr, int value)
static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
{
int i;
......@@ -405,7 +457,7 @@ static void mdio_write(void *ioaddr, int RegAddr, int value)
}
}
static int mdio_read(void *ioaddr, int RegAddr)
static int mdio_read(void __iomem *ioaddr, int RegAddr)
{
int i, value = -1;
......@@ -423,32 +475,32 @@ static int mdio_read(void *ioaddr, int RegAddr)
return value;
}
static unsigned int rtl8169_tbi_reset_pending(void *ioaddr)
static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
{
return RTL_R32(TBICSR) & TBIReset;
}
static unsigned int rtl8169_xmii_reset_pending(void *ioaddr)
static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
{
return mdio_read(ioaddr, 0) & 0x8000;
}
static unsigned int rtl8169_tbi_link_ok(void *ioaddr)
static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
{
return RTL_R32(TBICSR) & TBILinkOk;
}
static unsigned int rtl8169_xmii_link_ok(void *ioaddr)
static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
{
return RTL_R8(PHYstatus) & LinkStatus;
}
static void rtl8169_tbi_reset_enable(void *ioaddr)
static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
{
RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
}
static void rtl8169_xmii_reset_enable(void *ioaddr)
static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
{
unsigned int val;
......@@ -457,7 +509,7 @@ static void rtl8169_xmii_reset_enable(void *ioaddr)
}
static void rtl8169_check_link_status(struct net_device *dev,
struct rtl8169_private *tp, void *ioaddr)
struct rtl8169_private *tp, void __iomem *ioaddr)
{
unsigned long flags;
......@@ -512,11 +564,16 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
strcpy(info->bus_info, pci_name(tp->pci_dev));
}
static int rtl8169_get_regs_len(struct net_device *dev)
{
return R8169_REGS_SIZE;
}
static int rtl8169_set_speed_tbi(struct net_device *dev,
u8 autoneg, u16 speed, u8 duplex)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
int ret = 0;
u32 reg;
......@@ -540,7 +597,7 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
u8 autoneg, u16 speed, u8 duplex)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
int auto_nego, giga_ctrl;
auto_nego = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
......@@ -602,10 +659,108 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret;
}
static u32 rtl8169_get_rx_csum(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
return tp->cp_cmd & RxChkSum;
}
static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (data)
tp->cp_cmd |= RxChkSum;
else
tp->cp_cmd &= ~RxChkSum;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_R16(CPlusCmd);
spin_unlock_irqrestore(&tp->lock, flags);
return 0;
}
#ifdef CONFIG_R8169_VLAN
static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
struct sk_buff *skb)
{
return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb)) : 0x00;
}
static void rtl8169_vlan_rx_register(struct net_device *dev,
struct vlan_group *grp)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
tp->vlgrp = grp;
if (tp->vlgrp)
tp->cp_cmd |= RxVlan;
else
tp->cp_cmd &= ~RxVlan;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_R16(CPlusCmd);
spin_unlock_irqrestore(&tp->lock, flags);
}
static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct rtl8169_private *tp = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (tp->vlgrp)
tp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&tp->lock, flags);
}
static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
struct sk_buff *skb)
{
u32 opts2 = desc->opts2;
int ret;
if (tp->vlgrp && (opts2 & RxVlanTag)) {
rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
be16_to_cpu(opts2 & 0xffff));
ret = 0;
} else
ret = -1;
desc->opts2 = 0;
return ret;
}
#else /* !CONFIG_R8169_VLAN */
static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
struct sk_buff *skb)
{
return 0;
}
static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
struct sk_buff *skb)
{
return -1;
}
#endif
static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
u32 status;
cmd->supported =
......@@ -624,7 +779,7 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
u8 status;
cmd->supported = SUPPORTED_10baseT_Half |
......@@ -675,15 +830,38 @@ static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
unsigned long flags;
if (regs->len > R8169_REGS_SIZE)
regs->len = R8169_REGS_SIZE;
spin_lock_irqsave(&tp->lock, flags);
memcpy_fromio(p, tp->mmio_addr, regs->len);
spin_unlock_irqrestore(&tp->lock, flags);
}
static struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
.get_settings = rtl8169_get_settings,
.set_settings = rtl8169_set_settings,
.get_rx_csum = rtl8169_get_rx_csum,
.set_rx_csum = rtl8169_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_regs = rtl8169_get_regs,
};
static void rtl8169_write_gmii_reg_bit(void *ioaddr, int reg, int bitnum,
static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
int bitval)
{
int val;
......@@ -694,12 +872,13 @@ static void rtl8169_write_gmii_reg_bit(void *ioaddr, int reg, int bitnum,
mdio_write(ioaddr, reg, val & 0xffff);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp, void *ioaddr)
static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
{
const struct {
u32 mask;
int mac_version;
} mac_info[] = {
{ 0x1 << 28, RTL_GIGA_MAC_VER_X },
{ 0x1 << 26, RTL_GIGA_MAC_VER_E },
{ 0x1 << 23, RTL_GIGA_MAC_VER_D },
{ 0x00000000, RTL_GIGA_MAC_VER_B } /* Catch-all */
......@@ -734,7 +913,7 @@ static void rtl8169_print_mac_version(struct rtl8169_private *tp)
dprintk("mac_version == Unknown\n");
}
static void rtl8169_get_phy_version(struct rtl8169_private *tp, void *ioaddr)
static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
{
const struct {
u16 mask;
......@@ -780,7 +959,7 @@ static void rtl8169_print_phy_version(struct rtl8169_private *tp)
static void rtl8169_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
struct {
u16 regs[5]; /* Beware of bit-sign propagation */
} phy_magic[5] = { {
......@@ -850,7 +1029,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
struct net_device *dev = (struct net_device *)__opaque;
struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long timeout = RTL8169_PHY_TIMEOUT;
assert(tp->mac_version > RTL_GIGA_MAC_VER_B);
......@@ -911,41 +1090,65 @@ static inline void rtl8169_request_timer(struct net_device *dev)
add_timer(timer);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void rtl8169_netpoll(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
disable_irq(pdev->irq);
rtl8169_interrupt(pdev->irq, dev, NULL);
enable_irq(pdev->irq);
}
#endif
static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
void __iomem *ioaddr)
{
iounmap(ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
}
static int __devinit
rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
void **ioaddr_out)
void __iomem **ioaddr_out)
{
void *ioaddr = NULL;
void __iomem *ioaddr;
struct net_device *dev;
struct rtl8169_private *tp;
unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
int rc, i, acpi_idle_state = 0, pm_cap;
int rc = -ENOMEM, i, acpi_idle_state = 0, pm_cap;
assert(pdev != NULL);
assert(ioaddr_out != NULL);
*ioaddr_out = NULL;
*dev_out = NULL;
// dev zeroed in alloc_etherdev
dev = alloc_etherdev(sizeof (*tp));
if (dev == NULL) {
printk(KERN_ERR PFX "unable to alloc new ethernet\n");
return -ENOMEM;
goto err_out;
}
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
tp = dev->priv;
tp = netdev_priv(dev);
// enable device (incl. PCI PM wakeup and hotplug setup)
rc = pci_enable_device(pdev);
if (rc) {
printk(KERN_ERR PFX "%s: enable failure\n", pdev->slot_name);
goto err_out;
goto err_out_free_dev;
}
rc = pci_set_mwi(pdev);
if (rc < 0)
goto err_out_disable;
/* save power state before pci_enable_device overwrites it */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap) {
......@@ -956,41 +1159,37 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
} else {
printk(KERN_ERR PFX
"Cannot find PowerManagement capability, aborting.\n");
goto err_out_free_res;
goto err_out_mwi;
}
mmio_start = pci_resource_start(pdev, 1);
mmio_end = pci_resource_end(pdev, 1);
mmio_flags = pci_resource_flags(pdev, 1);
mmio_len = pci_resource_len(pdev, 1);
// make sure PCI base addr 1 is MMIO
if (!(mmio_flags & IORESOURCE_MEM)) {
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX
"region #1 not an MMIO resource, aborting\n");
rc = -ENODEV;
goto err_out_disable;
goto err_out_mwi;
}
// check for weird/broken PCI region reporting
if (mmio_len < RTL_MIN_IO_SIZE) {
if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) {
printk(KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
goto err_out_disable;
goto err_out_mwi;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc) {
printk(KERN_ERR PFX "%s: could not request regions.\n",
pdev->slot_name);
goto err_out_disable;
goto err_out_mwi;
}
tp->cp_cmd = PCIMulRW | RxChkSum;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac)
!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
tp->cp_cmd |= PCIDAC;
else {
dev->features |= NETIF_F_HIGHDMA;
} else {
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc < 0) {
printk(KERN_ERR PFX "DMA configuration failed.\n");
......@@ -998,12 +1197,10 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
}
// enable PCI bus-mastering
pci_set_master(pdev);
// ioremap MMIO region
ioaddr = ioremap(mmio_start, mmio_len);
ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE);
if (ioaddr == NULL) {
printk(KERN_ERR PFX "cannot remap MMIO, aborting\n");
rc = -EIO;
......@@ -1040,27 +1237,36 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
tp->chipset = i;
tp->rx_buf_sz = RX_BUF_SIZE;
*ioaddr_out = ioaddr;
*dev_out = dev;
return 0;
out:
return rc;
err_out_free_res:
pci_release_regions(pdev);
err_out_mwi:
pci_clear_mwi(pdev);
err_out_disable:
pci_disable_device(pdev);
err_out:
err_out_free_dev:
free_netdev(dev);
return rc;
err_out:
*ioaddr_out = NULL;
*dev_out = NULL;
goto out;
}
static int __devinit
rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct rtl8169_private *tp = NULL;
void *ioaddr = NULL;
struct rtl8169_private *tp;
void __iomem *ioaddr = NULL;
static int board_idx = -1;
static int printed_version = 0;
u8 autoneg, duplex;
......@@ -1081,10 +1287,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
tp = dev->priv;
tp = netdev_priv(dev);
assert(ioaddr != NULL);
assert(dev != NULL);
assert(tp != NULL);
if (RTL_R8(PHYstatus) & TBI_Enable) {
tp->set_speed = rtl8169_set_speed_tbi;
......@@ -1109,18 +1313,30 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->open = rtl8169_open;
dev->hard_start_xmit = rtl8169_start_xmit;
dev->get_stats = rtl8169_get_stats;
dev->ethtool_ops = &rtl8169_ethtool_ops;
SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
dev->stop = rtl8169_close;
dev->tx_timeout = rtl8169_tx_timeout;
dev->set_multicast_list = rtl8169_set_rx_mode;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
#ifdef CONFIG_R8169_NAPI
dev->poll = rtl8169_poll;
dev->weight = R8169_NAPI_WEIGHT;
printk(KERN_INFO PFX "NAPI enabled\n");
#endif
#ifdef CONFIG_R8169_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = rtl8169_vlan_rx_register;
dev->vlan_rx_kill_vid = rtl8169_vlan_rx_kill_vid;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = rtl8169_netpoll;
#endif
tp->intr_mask = 0xffff;
tp->pci_dev = pdev;
tp->mmio_addr = ioaddr;
......@@ -1129,10 +1345,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = register_netdev(dev);
if (rc) {
iounmap(ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
rtl8169_release_board(pdev, dev, ioaddr);
return rc;
}
......@@ -1188,11 +1401,7 @@ rtl8169_remove_one(struct pci_dev *pdev)
assert(tp != NULL);
unregister_netdev(dev);
iounmap(tp->mmio_addr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
rtl8169_release_board(pdev, dev, tp->mmio_addr);
pci_set_drvdata(pdev, NULL);
}
......@@ -1202,7 +1411,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
if (!netif_running(dev))
......@@ -1271,6 +1480,8 @@ rtl8169_open(struct net_device *dev)
if (retval < 0)
goto err_free_rx;
INIT_WORK(&tp->task, NULL, dev);
rtl8169_hw_start(dev);
rtl8169_request_timer(dev);
......@@ -1290,11 +1501,23 @@ rtl8169_open(struct net_device *dev)
goto out;
}
static void rtl8169_hw_reset(void __iomem *ioaddr)
{
/* Disable interrupts */
RTL_W16(IntrMask, 0x0000);
/* Reset the chipset */
RTL_W8(ChipCmd, CmdReset);
/* PCI commit */
RTL_R8(ChipCmd);
}
static void
rtl8169_hw_start(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
u32 i;
/* Soft reset the chip. */
......@@ -1333,8 +1556,6 @@ rtl8169_hw_start(struct net_device *dev)
RTL_W16(CPlusCmd, tp->cp_cmd);
}
tp->cur_rx = 0;
RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
......@@ -1358,49 +1579,51 @@ rtl8169_hw_start(struct net_device *dev)
static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
{
desc->addr = 0x0badbadbadbadbadull;
desc->status &= ~cpu_to_le32(OWNbit | RsvdMask);
desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
}
static void rtl8169_free_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
struct RxDesc *desc)
static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
struct sk_buff **sk_buff, struct RxDesc *desc)
{
pci_unmap_single(pdev, le64_to_cpu(desc->addr), RX_BUF_SIZE,
struct pci_dev *pdev = tp->pci_dev;
pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(*sk_buff);
*sk_buff = NULL;
rtl8169_make_unusable_by_asic(desc);
}
static inline void rtl8169_return_to_asic(struct RxDesc *desc)
static inline void rtl8169_return_to_asic(struct RxDesc *desc, int rx_buf_sz)
{
desc->status |= cpu_to_le32(OWNbit + RX_BUF_SIZE);
desc->opts1 |= cpu_to_le32(DescOwn + rx_buf_sz);
}
static inline void rtl8169_give_to_asic(struct RxDesc *desc, dma_addr_t mapping)
static inline void rtl8169_give_to_asic(struct RxDesc *desc, dma_addr_t mapping,
int rx_buf_sz)
{
desc->addr = cpu_to_le64(mapping);
desc->status |= cpu_to_le32(OWNbit + RX_BUF_SIZE);
desc->opts1 |= cpu_to_le32(DescOwn + rx_buf_sz);
}
static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct net_device *dev,
struct sk_buff **sk_buff, struct RxDesc *desc)
static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
struct RxDesc *desc, int rx_buf_sz)
{
struct sk_buff *skb;
dma_addr_t mapping;
int ret = 0;
skb = dev_alloc_skb(RX_BUF_SIZE);
skb = dev_alloc_skb(rx_buf_sz);
if (!skb)
goto err_out;
skb->dev = dev;
skb_reserve(skb, 2);
*sk_buff = skb;
mapping = pci_map_single(pdev, skb->tail, RX_BUF_SIZE,
mapping = pci_map_single(pdev, skb->tail, rx_buf_sz,
PCI_DMA_FROMDEVICE);
rtl8169_give_to_asic(desc, mapping);
rtl8169_give_to_asic(desc, mapping, rx_buf_sz);
out:
return ret;
......@@ -1417,7 +1640,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
for (i = 0; i < NUM_RX_DESC; i++) {
if (tp->Rx_skbuff[i]) {
rtl8169_free_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
tp->RxDescArray + i);
}
}
......@@ -1434,8 +1657,8 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
if (tp->Rx_skbuff[i])
continue;
ret = rtl8169_alloc_rx_skb(tp->pci_dev, dev, tp->Rx_skbuff + i,
tp->RxDescArray + i);
ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
tp->RxDescArray + i, tp->rx_buf_sz);
if (ret < 0)
break;
}
......@@ -1444,19 +1667,21 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
{
desc->status |= cpu_to_le32(EORbit);
desc->opts1 |= cpu_to_le32(RingEnd);
}
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
}
static int rtl8169_init_ring(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
tp->cur_rx = tp->dirty_rx = 0;
tp->cur_tx = tp->dirty_tx = 0;
memset(tp->TxDescArray, 0x0, NUM_TX_DESC * sizeof (struct TxDesc));
memset(tp->RxDescArray, 0x0, NUM_RX_DESC * sizeof (struct RxDesc));
rtl8169_init_ring_indexes(tp);
memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
......@@ -1471,123 +1696,293 @@ static int rtl8169_init_ring(struct net_device *dev)
return -ENOMEM;
}
static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
struct TxDesc *desc)
{
u32 len = sk_buff[0]->len;
unsigned int len = tx_skb->len;
pci_unmap_single(pdev, le64_to_cpu(desc->addr),
len < ETH_ZLEN ? ETH_ZLEN : len, PCI_DMA_TODEVICE);
pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
desc->opts1 = 0x00;
desc->opts2 = 0x00;
desc->addr = 0x00;
*sk_buff = NULL;
tx_skb->len = 0;
}
static void
rtl8169_tx_clear(struct rtl8169_private *tp)
static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
int i;
unsigned int i;
tp->cur_tx = 0;
for (i = 0; i < NUM_TX_DESC; i++) {
struct sk_buff *skb = tp->Tx_skbuff[i];
for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
unsigned int entry = i % NUM_TX_DESC;
struct ring_info *tx_skb = tp->tx_skb + entry;
unsigned int len = tx_skb->len;
if (skb) {
rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + i,
tp->TxDescArray + i);
dev_kfree_skb(skb);
if (len) {
struct sk_buff *skb = tx_skb->skb;
rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
tp->TxDescArray + entry);
if (skb) {
dev_kfree_skb(skb);
tx_skb->skb = NULL;
}
tp->stats.tx_dropped++;
}
}
tp->cur_tx = tp->dirty_tx = 0;
}
static void
rtl8169_tx_timeout(struct net_device *dev)
static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u8 tmp8;
printk(KERN_INFO "%s: TX Timeout\n", dev->name);
/* disable Tx, if not already */
tmp8 = RTL_R8(ChipCmd);
if (tmp8 & CmdTxEnb)
RTL_W8(ChipCmd, tmp8 & ~CmdTxEnb);
PREPARE_WORK(&tp->task, task, dev);
schedule_delayed_work(&tp->task, 4);
}
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16(IntrMask, 0x0000);
static void rtl8169_wait_for_quiescence(struct net_device *dev)
{
synchronize_irq(dev->irq);
/* Stop a shared interrupt from scavenging while we are. */
spin_lock_irq(&tp->lock);
rtl8169_tx_clear(tp);
spin_unlock_irq(&tp->lock);
/* Wait for any pending NAPI task to complete */
netif_poll_disable(dev);
}
/* ...and finally, reset everything */
rtl8169_hw_start(dev);
static void rtl8169_reinit_task(void *_data)
{
struct net_device *dev = _data;
int ret;
netif_wake_queue(dev);
if (netif_running(dev)) {
rtl8169_wait_for_quiescence(dev);
rtl8169_close(dev);
}
ret = rtl8169_open(dev);
if (unlikely(ret < 0)) {
if (net_ratelimit()) {
printk(PFX KERN_ERR "%s: reinit failure (status = %d)."
" Rescheduling.\n", dev->name, ret);
}
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
}
static int
rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void rtl8169_reset_task(void *_data)
{
struct net_device *dev = _data;
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
u32 len = skb->len;
if (unlikely(skb->len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
goto err_update_stats;
len = ETH_ZLEN;
if (!netif_running(dev))
return;
rtl8169_wait_for_quiescence(dev);
rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
rtl8169_tx_clear(tp);
if (tp->dirty_rx == tp->cur_rx) {
rtl8169_init_ring_indexes(tp);
rtl8169_hw_start(dev);
netif_wake_queue(dev);
} else {
if (net_ratelimit()) {
printk(PFX KERN_EMERG "%s: Rx buffers shortage\n",
dev->name);
}
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
}
static void rtl8169_tx_timeout(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_hw_reset(tp->mmio_addr);
/* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
u32 opts1)
{
struct skb_shared_info *info = skb_shinfo(skb);
unsigned int cur_frag, entry;
struct TxDesc *txd;
entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
skb_frag_t *frag = info->frags + cur_frag;
dma_addr_t mapping;
u32 status;
u32 status, len;
void *addr;
mapping = pci_map_single(tp->pci_dev, skb->data, len,
PCI_DMA_TODEVICE);
entry = (entry + 1) % NUM_TX_DESC;
tp->Tx_skbuff[entry] = skb;
tp->TxDescArray[entry].addr = cpu_to_le64(mapping);
txd = tp->TxDescArray + entry;
len = frag->size;
addr = ((void *) page_address(frag->page)) + frag->page_offset;
mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
/* anti gcc 2.95.3 bugware */
status = OWNbit | FSbit | LSbit | len |
(EORbit * !((entry + 1) % NUM_TX_DESC));
tp->TxDescArray[entry].status = cpu_to_le32(status);
RTL_W8(TxPoll, 0x40); //set polling bit
/* anti gcc 2.95.3 bugware (sic) */
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
dev->trans_start = jiffies;
txd->opts1 = cpu_to_le32(status);
txd->addr = cpu_to_le64(mapping);
tp->cur_tx++;
smp_wmb();
} else
goto err_drop;
tp->tx_skb[entry].len = len;
}
if (cur_frag) {
tp->tx_skb[entry].skb = skb;
txd->opts1 |= cpu_to_le32(LastFrag);
}
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
u32 dirty = tp->dirty_tx;
return cur_frag;
}
static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
{
if (dev->features & NETIF_F_TSO) {
u32 mss = skb_shinfo(skb)->tso_size;
if (mss)
return LargeSend | ((mss & MSSMask) << MSSShift);
}
if (skb->ip_summed == CHECKSUM_HW) {
const struct iphdr *ip = skb->nh.iph;
if (ip->protocol == IPPROTO_TCP)
return IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
return IPCS | UDPCS;
WARN_ON(1); /* we need a WARN() */
}
return 0;
}
static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
struct TxDesc *txd = tp->TxDescArray + entry;
void __iomem *ioaddr = tp->mmio_addr;
dma_addr_t mapping;
u32 status, len;
u32 opts1;
int ret = 0;
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
goto err_stop;
}
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop;
opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
frags = rtl8169_xmit_frags(tp, skb, opts1);
if (frags) {
len = skb_headlen(skb);
opts1 |= FirstFrag;
} else {
len = skb->len;
if (unlikely(len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
goto err_update_stats;
len = ETH_ZLEN;
}
opts1 |= FirstFrag | LastFrag;
tp->tx_skb[entry].skb = skb;
}
mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
wmb();
/* anti gcc 2.95.3 bugware (sic) */
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
txd->opts1 = cpu_to_le32(status);
dev->trans_start = jiffies;
tp->cur_tx += frags + 1;
smp_wmb();
RTL_W8(TxPoll, 0x40); //set polling bit
if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
netif_stop_queue(dev);
smp_rmb();
if (dirty != tp->dirty_tx)
if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
netif_wake_queue(dev);
}
out:
return 0;
return ret;
err_drop:
dev_kfree_skb(skb);
err_stop:
netif_stop_queue(dev);
ret = 1;
err_update_stats:
tp->stats.tx_dropped++;
goto out;
}
static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
void __iomem *ioaddr = tp->mmio_addr;
u16 pci_status, pci_cmd;
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
pci_read_config_word(pdev, PCI_STATUS, &pci_status);
printk(KERN_ERR PFX "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
dev->name, pci_cmd, pci_status);
/*
* The recovery sequence below admits a very elaborated explanation:
* - it seems to work;
* - I did not see what else could be done.
*
* Feel free to adjust to your needs.
*/
pci_write_config_word(pdev, PCI_COMMAND,
pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
pci_write_config_word(pdev, PCI_STATUS,
pci_status & (PCI_STATUS_DETECTED_PARITY |
PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && (tp->dirty_rx == tp->cur_rx == 0)) {
printk(KERN_INFO PFX "%s: disabling PCI DAC.\n", dev->name);
tp->cp_cmd &= ~PCIDAC;
RTL_W16(CPlusCmd, tp->cp_cmd);
dev->features &= ~NETIF_F_HIGHDMA;
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
rtl8169_hw_reset(ioaddr);
}
static void
rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
void *ioaddr)
void __iomem *ioaddr)
{
unsigned int dirty_tx, tx_left;
......@@ -1601,22 +1996,24 @@ rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
while (tx_left > 0) {
unsigned int entry = dirty_tx % NUM_TX_DESC;
struct sk_buff *skb = tp->Tx_skbuff[entry];
struct ring_info *tx_skb = tp->tx_skb + entry;
u32 len = tx_skb->len;
u32 status;
rmb();
status = le32_to_cpu(tp->TxDescArray[entry].status);
if (status & OWNbit)
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
/* FIXME: is it really accurate for TxErr ? */
tp->stats.tx_bytes += skb->len >= ETH_ZLEN ?
skb->len : ETH_ZLEN;
tp->stats.tx_bytes += len;
tp->stats.tx_packets++;
rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + entry,
tp->TxDescArray + entry);
dev_kfree_skb_irq(skb);
tp->Tx_skbuff[entry] = NULL;
rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
if (status & LastFrag) {
dev_kfree_skb_irq(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
tx_left--;
}
......@@ -1624,14 +2021,28 @@ rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
smp_wmb();
if (netif_queue_stopped(dev))
if (netif_queue_stopped(dev) &&
(TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
}
}
static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
{
u32 opts1 = desc->opts1;
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
((status == RxProtoIP) && !(opts1 & IPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
}
static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
struct RxDesc *desc,
struct net_device *dev)
struct RxDesc *desc, int rx_buf_sz)
{
int ret = -1;
......@@ -1640,11 +2051,10 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
skb = dev_alloc_skb(pkt_size + 2);
if (skb) {
skb->dev = dev;
skb_reserve(skb, 2);
eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
*sk_buff = skb;
rtl8169_return_to_asic(desc);
rtl8169_return_to_asic(desc, rx_buf_sz);
ret = 0;
}
}
......@@ -1653,7 +2063,7 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
static int
rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
void *ioaddr)
void __iomem *ioaddr)
{
unsigned int cur_rx, rx_left, count;
int delta;
......@@ -1671,9 +2081,9 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
u32 status;
rmb();
status = le32_to_cpu(tp->RxDescArray[entry].status);
status = le32_to_cpu(tp->RxDescArray[entry].opts1);
if (status & OWNbit)
if (status & DescOwn)
break;
if (status & RxRES) {
printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);
......@@ -1689,22 +2099,27 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
void (*pci_action)(struct pci_dev *, dma_addr_t,
size_t, int) = pci_dma_sync_single_for_device;
rtl8169_rx_csum(skb, desc);
pci_dma_sync_single_for_cpu(tp->pci_dev,
le64_to_cpu(desc->addr), RX_BUF_SIZE,
le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
if (rtl8169_try_rx_copy(&skb, pkt_size, desc, dev)) {
if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
tp->rx_buf_sz)) {
pci_action = pci_unmap_single;
tp->Rx_skbuff[entry] = NULL;
}
pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb->dev = dev;
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
rtl8169_rx_skb(skb);
if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
rtl8169_rx_skb(skb);
dev->last_rx = jiffies;
tp->stats.rx_bytes += pkt_size;
......@@ -1745,10 +2160,13 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
struct net_device *dev = (struct net_device *) dev_instance;
struct rtl8169_private *tp = netdev_priv(dev);
int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
int status = 0;
int handled = 0;
if (unlikely(!netif_running(dev)))
goto out;
do {
status = RTL_R16(IntrStatus);
......@@ -1766,11 +2184,7 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
break;
if (unlikely(status & SYSErr)) {
printk(KERN_ERR PFX "%s: PCI error (status: 0x%04x)."
" Device disabled.\n", dev->name, status);
RTL_W8(ChipCmd, 0x00);
RTL_W16(IntrMask, 0x0000);
RTL_R16(IntrMask);
rtl8169_pcierr_interrupt(dev);
break;
}
......@@ -1784,7 +2198,7 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
if (likely(netif_rx_schedule_prep(dev)))
__netif_rx_schedule(dev);
else {
printk(KERN_INFO "%s: interrupt %x taken in poll\n",
printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
dev->name, status);
}
break;
......@@ -1807,6 +2221,7 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
/* Clear all interrupt sources. */
RTL_W16(IntrStatus, 0xffff);
}
out:
return IRQ_RETVAL(handled);
}
......@@ -1815,7 +2230,7 @@ static int rtl8169_poll(struct net_device *dev, int *budget)
{
unsigned int work_done, work_to_do = min(*budget, dev->quota);
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
rtl8169_tx_interrupt(dev, tp, ioaddr);
......@@ -1845,10 +2260,12 @@ rtl8169_close(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
netif_stop_queue(dev);
flush_scheduled_work();
rtl8169_delete_timer(dev);
spin_lock_irq(&tp->lock);
......@@ -1865,9 +2282,10 @@ rtl8169_close(struct net_device *dev)
spin_unlock_irq(&tp->lock);
synchronize_irq(dev->irq);
free_irq(dev->irq, dev);
netif_poll_disable(dev);
rtl8169_tx_clear(tp);
rtl8169_rx_clear(tp);
......@@ -1886,7 +2304,7 @@ static void
rtl8169_set_rx_mode(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
......@@ -1938,7 +2356,7 @@ rtl8169_set_rx_mode(struct net_device *dev)
static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
if (netif_running(dev)) {
......
......@@ -289,6 +289,8 @@ typedef struct _XENA_dev_config {
u64 tda_err_alarm;
u64 pcc_err_reg;
#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8)
u64 pcc_err_mask;
u64 pcc_err_alarm;
......@@ -512,6 +514,7 @@ typedef struct _XENA_dev_config {
#define RX_PA_CFG_IGNORE_FRM_ERR BIT(1)
#define RX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
#define RX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
#define RX_PA_CFG_IGNORE_L2_ERR BIT(6)
u8 unused12[0x700 - 0x1D8];
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -16,6 +16,7 @@
#define TBD 0
#define BIT(loc) (0x8000000000000000ULL >> (loc))
#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz))
#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff)
#ifndef BOOL
#define BOOL int
......@@ -52,8 +53,6 @@ typedef enum xena_max_outstanding_splits {
/*
* Debug related variables.
*/
#define DEBUG_ON TRUE
/* different debug levels. */
#define ERR_DBG 0
#define INIT_DBG 1
......@@ -312,7 +311,7 @@ typedef struct stat_block {
/* Maintains Per FIFO related information. */
typedef struct tx_fifo_config {
#define MAX_AVAILABLE_TXDS 8192
u32 FifoLen; /* specifies len of FIFO upto 8192, ie no of TxDLs */
u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */
/* Priority definition */
#define TX_FIFO_PRI_0 0 /*Highest */
#define TX_FIFO_PRI_1 1
......@@ -322,9 +321,9 @@ typedef struct tx_fifo_config {
#define TX_FIFO_PRI_5 5
#define TX_FIFO_PRI_6 6
#define TX_FIFO_PRI_7 7 /*lowest */
u8 FifoPriority; /* specifies pointer level for FIFO */
u8 fifo_priority; /* specifies pointer level for FIFO */
/* user should not set twos fifos with same pri */
u8 fNoSnoop;
u8 f_no_snoop;
#define NO_SNOOP_TXD 0x01
#define NO_SNOOP_TXD_BUFFER 0x02
} tx_fifo_config_t;
......@@ -332,7 +331,7 @@ typedef struct tx_fifo_config {
/* Maintains per Ring related information */
typedef struct rx_ring_config {
u32 NumRxd; /*No of RxDs per Rx Ring */
u32 num_rxd; /*No of RxDs per Rx Ring */
#define RX_RING_PRI_0 0 /* highest */
#define RX_RING_PRI_1 1
#define RX_RING_PRI_2 2
......@@ -342,70 +341,37 @@ typedef struct rx_ring_config {
#define RX_RING_PRI_6 6
#define RX_RING_PRI_7 7 /* lowest */
u8 RingPriority; /*Specifies service priority of ring */
u8 ring_priority; /*Specifies service priority of ring */
/* OSM should not set any two rings with same priority */
u8 RingOrg; /*Organization of ring */
#define RING_ORG_BUFF1 0x01
#define RX_RING_ORG_BUFF3 0x03
#define RX_RING_ORG_BUFF5 0x05
/* In case of 3 buffer recv. mode, size of three buffers is expected as.. */
#define BUFF_SZ_1 22 /* ethernet header */
#define BUFF_SZ_2 (64+64) /* max. IP+TCP header size */
#define BUFF_SZ_3 (1500-20-20) /* TCP payload */
#define BUFF_SZ_3_JUMBO (9600-20-20) /* Jumbo TCP payload */
u32 RxdThresh; /*No of used Rxds NIC can store before transfer to host */
#define DEFAULT_RXD_THRESHOLD 0x1 /* TODO */
u8 fNoSnoop;
u8 ring_org; /*Organization of ring */
#define RING_ORG_BUFF1 0x01
#define RX_RING_ORG_BUFF3 0x03
#define RX_RING_ORG_BUFF5 0x05
u8 f_no_snoop;
#define NO_SNOOP_RXD 0x01
#define NO_SNOOP_RXD_BUFFER 0x02
u32 RxD_BackOff_Interval;
#define RXD_BACKOFF_INTERVAL_DEF 0x0
#define RXD_BACKOFF_INTERVAL_MIN 0x0
#define RXD_BACKOFF_INTERVAL_MAX 0x0
} rx_ring_config_t;
/* This structure provides contains values of the tunable parameters
* of the H/W
*/
struct config_param {
/* Tx Side */
u32 TxFIFONum; /*Number of Tx FIFOs */
u32 tx_fifo_num; /*Number of Tx FIFOs */
#define MAX_TX_FIFOS 8
tx_fifo_config_t TxCfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
u32 MaxTxDs; /*Max no. of Tx buffer descriptor per TxDL */
BOOL TxVLANEnable; /*TRUE: Insert VLAN ID, FALSE: Don't insert */
#define TX_REQ_TIMEOUT_DEFAULT 0x0
#define TX_REQ_TIMEOUT_MIN 0x0
#define TX_REQ_TIMEOUT_MAX 0x0
u32 TxReqTimeOut;
BOOL TxFlow; /*Tx flow control enable */
BOOL RxFlow;
BOOL OverrideTxServiceState; /* TRUE: Overide, FALSE: Do not override
Use the new priority information
of service state. It is not recommended
to change but OSM can opt to do so */
#define MAX_SERVICE_STATES 36
u8 TxServiceState[MAX_SERVICE_STATES];
/* Array element represent 'priority'
* and array index represents
* 'Service state' e.g.
* TxServiceState[3]=7; it means
* Service state 3 is associated
* with priority 7 of a Tx FIFO */
u64 TxIntrType; /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
u64 tx_intr_type;
/* Specifies if Tx Intr is UTILZ or PER_LIST type. */
/* Rx Side */
u32 RxRingNum; /*Number of receive rings */
u32 rx_ring_num; /*Number of receive rings */
#define MAX_RX_RINGS 8
#define MAX_RX_BLOCKS_PER_RING 150
rx_ring_config_t RxCfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
BOOL RxVLANEnable; /*TRUE: Strip off VLAN tag from the frame,
FALSE: Don't strip off VLAN tag */
rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
#define HEADER_ETHERNET_II_802_3_SIZE 14
#define HEADER_802_2_SIZE 3
......@@ -419,23 +385,6 @@ struct config_param {
#define MAX_PYLD_JUMBO 9600
#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
u32 MTU; /*Maximum Payload */
BOOL JumboEnable; /*Enable Jumbo frames recv/send */
BOOL OverrideRxServiceState; /* TRUE: Overide, FALSE: Do not override
Use the new priority information
of service state. It is not recommended
to change but OSM can opt to do so */
#define MAX_SERVICE_STATES 36
u8 RxServiceState[MAX_SERVICE_STATES];
/* Array element represent 'priority'
* and array index represents
* 'Service state'e.g.
* RxServiceState[3]=7; it means
* Service state 3 is associated
* with priority 7 of a Rx FIFO */
BOOL StatAutoRefresh; /* When true, StatRefreshTime have valid value */
u32 StatRefreshTime; /*Time for refreshing statistics */
#define STAT_TRSF_PER_1_SECOND 0x208D5
};
/* Structure representing MAC Addrs */
......@@ -491,6 +440,12 @@ typedef struct _TxD {
u64 Host_Control; /* reserved for host */
} TxD_t;
/* Structure to hold the phy and virt addr of every TxDL. */
typedef struct list_info_hold {
dma_addr_t list_phy_addr;
void *list_virt_addr;
} list_info_hold_t;
/* Rx descriptor structure */
typedef struct _RxD_t {
u64 Host_Control; /* reserved for host */
......@@ -507,36 +462,80 @@ typedef struct _RxD_t {
#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
u64 Control_2;
#ifndef CONFIG_2BUFF_MODE
#define MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
#define SET_BUFFER0_SIZE(val) vBIT(val,0,16)
#else
#define MASK_BUFFER0_SIZE vBIT(0xFF,0,16)
#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
#define SET_BUFFER1_SIZE(val) vBIT(val,16,16)
#define SET_BUFFER2_SIZE(val) vBIT(val,32,16)
#endif
#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
#define SET_VLAN_TAG(val) vBIT(val,48,16)
#define SET_NUM_TAG(val) vBIT(val,16,32)
#ifndef CONFIG_2BUFF_MODE
#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0xFFFF,0,16)))
/*
#define TXD_GET_BUFFER1_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER1_SIZE) >> (63-31))
#define TXD_GET_BUFFER2_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER2_SIZE) >> (63-47))
*/
#else
#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \
>> 48)
#define RXD_GET_BUFFER1_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER1_SIZE) \
>> 32)
#define RXD_GET_BUFFER2_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER2_SIZE) \
>> 16)
#define BUF0_LEN 40
#define BUF1_LEN 1
#endif
u64 Buffer0_ptr;
#ifdef CONFIG_2BUFF_MODE
u64 Buffer1_ptr;
u64 Buffer2_ptr;
#endif
} RxD_t;
/* Structure that represents the Rx descriptor block which contains
* 128 Rx descriptors.
*/
#ifndef CONFIG_2BUFF_MODE
typedef struct _RxD_block {
#define MAX_RXDS_PER_BLOCK 127
RxD_t rxd[MAX_RXDS_PER_BLOCK];
u64 reserved_0;
#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd in this blk */
u64 reserved_2_pNext_RxD_block; /*@ Logical ptr to next */
u64 pNext_RxD_Blk_physical; /* Buff0_ptr.
In a 32 bit arch the upper 32 bits
should be 0 */
u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
* Rxd in this blk */
u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
* the upper 32 bits should
* be 0 */
} RxD_block_t;
#else
typedef struct _RxD_block {
#define MAX_RXDS_PER_BLOCK 85
RxD_t rxd[MAX_RXDS_PER_BLOCK];
#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
* in this blk */
u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
} RxD_block_t;
#define SIZE_OF_BLOCK 4096
/* Structure to hold virtual addresses of Buf0 and Buf1 in
* 2buf mode. */
typedef struct bufAdd {
void *ba_0_org;
void *ba_1_org;
void *ba_0;
void *ba_1;
} buffAdd_t;
#endif
/* Structure which stores all the MAC control parameters */
......@@ -568,10 +567,6 @@ typedef tx_curr_get_info_t tx_curr_put_info_t;
*/
typedef struct mac_info {
/* rx side stuff */
u32 rxd_ring_mem_sz;
RxD_t *RxRing[MAX_RX_RINGS]; /* Logical Rx ring pointers */
dma_addr_t RxRing_Phy[MAX_RX_RINGS];
/* Put pointer info which indictes which RxD has to be replenished
* with a new buffer.
*/
......@@ -583,41 +578,21 @@ typedef struct mac_info {
rx_curr_get_info_t rx_curr_get_info[MAX_RX_RINGS];
u16 rmac_pause_time;
/* this will be used in receive function, this decides which ring would
be processed first. eg: ring with priority value 0 (highest) should
be processed first.
first 3 LSB bits represent ring number which should be processed
first, similarly next 3 bits represent next ring to be processed.
eg: value of _rx_ring_pri_map = 0x0000 003A means
ring #2 would be processed first and #7 would be processed next
*/
u32 _rx_ring_pri_map;
u16 mc_pause_threshold_q0q3;
u16 mc_pause_threshold_q4q7;
/* tx side stuff */
void *txd_list_mem; /* orignal pointer to allocated mem */
dma_addr_t txd_list_mem_phy;
u32 txd_list_mem_sz;
/* logical pointer of start of each Tx FIFO */
TxFIFO_element_t *tx_FIFO_start[MAX_TX_FIFOS];
/* logical pointer of start of TxDL which corresponds to each Tx FIFO */
TxD_t *txdl_start[MAX_TX_FIFOS];
/* Same as txdl_start but phy addr */
dma_addr_t txdl_start_phy[MAX_TX_FIFOS];
/* Current offset within tx_FIFO_start, where driver would write new Tx frame*/
tx_curr_put_info_t tx_curr_put_info[MAX_TX_FIFOS];
tx_curr_get_info_t tx_curr_get_info[MAX_TX_FIFOS];
u16 txdl_len; /* length of a TxDL, same for all */
void *stats_mem; /* orignal pointer to allocated mem */
dma_addr_t stats_mem_phy; /* Physical address of the stat block */
u32 stats_mem_sz;
StatInfo_t *StatsInfo; /* Logical address of the stat block */
StatInfo_t *stats_info; /* Logical address of the stat block */
} mac_info_t;
/* structure representing the user defined MAC addresses */
......@@ -632,13 +607,20 @@ typedef struct rx_block_info {
dma_addr_t block_dma_addr;
} rx_block_info_t;
/* Default Tunable parameters of the NIC. */
#define DEFAULT_FIFO_LEN 4096
#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
#define LARGE_RXD_CNT 100 * (MAX_RXDS_PER_BLOCK+1)
#define SMALL_BLK_CNT 30
#define LARGE_BLK_CNT 100
/* Structure representing one instance of the NIC */
typedef struct s2io_nic {
#define MAX_MAC_SUPPORTED 16
#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
macaddr_t defMacAddr[MAX_MAC_SUPPORTED];
macaddr_t preMacAddr[MAX_MAC_SUPPORTED];
macaddr_t def_mac_addr[MAX_MAC_SUPPORTED];
macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED];
struct net_device_stats stats;
caddr_t bar0;
......@@ -651,7 +633,7 @@ typedef struct s2io_nic {
char name[32];
struct tasklet_struct task;
atomic_t tasklet_status;
volatile unsigned long tasklet_status;
struct timer_list timer;
struct net_device *dev;
struct pci_dev *pdev;
......@@ -670,8 +652,10 @@ typedef struct s2io_nic {
u32 irq;
atomic_t rx_bufs_left[MAX_RX_RINGS];
spinlock_t isr_lock;
spinlock_t tx_lock;
#ifndef CONFIG_S2IO_NAPI
spinlock_t put_lock;
#endif
#define PROMISC 1
#define ALL_MULTI 2
......@@ -690,23 +674,22 @@ typedef struct s2io_nic {
u16 tx_err_count;
u16 rx_err_count;
#if DEBUG_ON
u64 rxpkt_bytes;
u64 txpkt_bytes;
int int_cnt;
int rxint_cnt;
int txint_cnt;
u64 rxpkt_cnt;
#ifndef CONFIG_S2IO_NAPI
/* Index to the absolute position of the put pointer of Rx ring. */
int put_pos[MAX_RX_RINGS];
#endif
/* Place holders for the virtual and physical addresses of
/*
* Place holders for the virtual and physical addresses of
* all the Rx Blocks
*/
struct rx_block_info
rx_blocks[MAX_RX_RINGS][MAX_RX_BLOCKS_PER_RING];
rx_block_info_t rx_blocks[MAX_RX_RINGS][MAX_RX_BLOCKS_PER_RING];
int block_count[MAX_RX_RINGS];
int pkt_cnt[MAX_RX_RINGS];
/* Place holder of all the TX List's Phy and Virt addresses. */
list_info_hold_t *list_info[MAX_TX_FIFOS];
/* Id timer, used to blink NIC to physically identify NIC. */
struct timer_list id_timer;
......@@ -736,24 +719,29 @@ typedef struct s2io_nic {
u16 last_link_state;
#define LINK_DOWN 1
#define LINK_UP 2
#ifdef CONFIG_2BUFF_MODE
/* Buffer Address store. */
buffAdd_t **ba[MAX_RX_RINGS];
#endif
int task_flag;
#define CARD_DOWN 1
#define CARD_UP 2
atomic_t card_state;
volatile unsigned long link_state;
} nic_t;
#define RESET_ERROR 1;
#define CMD_ERROR 2;
/* Default Tunable parameters of the NIC. */
#define DEFAULT_FIFO_LEN 4096
#define SMALL_RXD_CNT 40 * (MAX_RXDS_PER_BLOCK+1)
#define LARGE_RXD_CNT 100 * (MAX_RXDS_PER_BLOCK+1)
/* OS related system calls */
#ifndef readq
static inline u64 readq(void *addr)
{
u64 ret = 0;
ret = readl(addr + 4);
ret <<= 32;
ret |= readl(addr);
(u64) ret <<= 32;
(u64) ret |= readl(addr);
return ret;
}
......@@ -765,6 +753,27 @@ static inline void writeq(u64 val, void *addr)
writel((u32) (val), addr);
writel((u32) (val >> 32), (addr + 4));
}
/* In 32 bit modes, some registers have to be written in a
* particular order to expect correct hardware operation. The
* macro SPECIAL_REG_WRITE is used to perform such ordered
* writes. Defines UF (Upper First) and LF (Lower First) will
* be used to specify the required write order.
*/
#define UF 1
#define LF 2
static inline void SPECIAL_REG_WRITE(u64 val, void *addr, int order)
{
if (order == LF) {
writel((u32) (val), addr);
writel((u32) (val >> 32), (addr + 4));
} else {
writel((u32) (val >> 32), (addr + 4));
writel((u32) (val), addr);
}
}
#else
#define SPECIAL_REG_WRITE(val, addr, dummy) writeq(val, addr)
#endif
/* Interrupt related values of Xena */
......@@ -815,30 +824,41 @@ static inline void writeq(u64 val, void *addr)
/* DMA level Inressupts */
#define TXDMA_PFC_INT_M BIT(0)
/* PFC block interrupts */
#define TXDMA_PCC_INT_M BIT(2)
/* PFC block interrupts */
#define PFC_MISC_ERR_1 BIT(0) /* Interrupt to indicate FIFO full */
/* PCC block interrupts. */
#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
PCC_FB_ECC Error. */
/*
* Prototype declaration.
*/
static int __devinit s2io_init_nic(struct pci_dev *pdev,
const struct pci_device_id *pre);
static void __devexit s2io_rem_nic(struct pci_dev *pdev);
static int initSharedMem(struct s2io_nic *sp);
static void freeSharedMem(struct s2io_nic *sp);
static int initNic(struct s2io_nic *nic);
static int init_shared_mem(struct s2io_nic *sp);
static void free_shared_mem(struct s2io_nic *sp);
static int init_nic(struct s2io_nic *nic);
#ifndef CONFIG_S2IO_NAPI
static void rxIntrHandler(struct s2io_nic *sp);
static void rx_intr_handler(struct s2io_nic *sp);
#endif
static void txIntrHandler(struct s2io_nic *sp);
static void alarmIntrHandler(struct s2io_nic *sp);
static void tx_intr_handler(struct s2io_nic *sp);
static void alarm_intr_handler(struct s2io_nic *sp);
static int s2io_starter(void);
void s2io_closer(void);
static void s2io_tx_watchdog(struct net_device *dev);
static void s2io_tasklet(unsigned long dev_addr);
static void s2io_set_multicast(struct net_device *dev);
static int rxOsmHandler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no);
#ifndef CONFIG_2BUFF_MODE
static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no);
#else
static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
buffAdd_t * ba);
#endif
void s2io_link(nic_t * sp, int link);
void s2io_reset(nic_t * sp);
#ifdef CONFIG_S2IO_NAPI
......@@ -849,5 +869,8 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
static int verify_xena_quiescence(u64 val64, int flag);
static struct ethtool_ops netdev_ethtool_ops;
static void s2io_set_link(unsigned long data);
static void s2io_card_down(nic_t * nic);
static int s2io_card_up(nic_t * nic);
#endif /* _S2IO_H */
......@@ -177,6 +177,7 @@ static const int multicast_filter_limit = 32;
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
......@@ -208,27 +209,15 @@ KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald
#ifdef CONFIG_VIA_RHINE_MMIO
#define USE_MMIO
#else
#undef readb
#undef readw
#undef readl
#undef writeb
#undef writew
#undef writel
#define readb inb
#define readw inw
#define readl inl
#define writeb outb
#define writew outw
#define writel outl
#endif
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(debug, "i");
MODULE_PARM(rx_copybreak, "i");
module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
......@@ -363,7 +352,7 @@ enum rhine_quirks {
*/
/* Beware of PCI posted writes */
#define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
static struct pci_device_id rhine_pci_tbl[] =
{
......@@ -500,6 +489,7 @@ struct rhine_private {
u8 tx_thresh, rx_thresh;
struct mii_if_info mii_if;
void __iomem *base;
};
static int mdio_read(struct net_device *dev, int phy_id, int location);
......@@ -529,14 +519,14 @@ static void rhine_shutdown (struct device *gdev);
static inline u32 get_intr_status(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u32 intr_status;
intr_status = readw(ioaddr + IntrStatus);
intr_status = ioread16(ioaddr + IntrStatus);
/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
if (rp->quirks & rqStatusWBRace)
intr_status |= readb(ioaddr + IntrStatus2) << 16;
intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
return intr_status;
}
......@@ -546,32 +536,32 @@ static inline u32 get_intr_status(struct net_device *dev)
*/
static void rhine_power_init(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u16 wolstat;
if (rp->quirks & rqWOL) {
/* Make sure chip is in power state D0 */
writeb(readb(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
/* Disable "force PME-enable" */
writeb(0x80, ioaddr + WOLcgClr);
iowrite8(0x80, ioaddr + WOLcgClr);
/* Clear power-event config bits (WOL) */
writeb(0xFF, ioaddr + WOLcrClr);
iowrite8(0xFF, ioaddr + WOLcrClr);
/* More recent cards can manage two additional patterns */
if (rp->quirks & rq6patterns)
writeb(0x03, ioaddr + WOLcrClr1);
iowrite8(0x03, ioaddr + WOLcrClr1);
/* Save power-event status bits */
wolstat = readb(ioaddr + PwrcsrSet);
wolstat = ioread8(ioaddr + PwrcsrSet);
if (rp->quirks & rq6patterns)
wolstat |= (readb(ioaddr + PwrcsrSet1) & 0x03) << 8;
wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
/* Clear power-event status bits */
writeb(0xFF, ioaddr + PwrcsrClr);
iowrite8(0xFF, ioaddr + PwrcsrClr);
if (rp->quirks & rq6patterns)
writeb(0x03, ioaddr + PwrcsrClr1);
iowrite8(0x03, ioaddr + PwrcsrClr1);
if (wolstat) {
char *reason;
......@@ -602,27 +592,27 @@ static void rhine_power_init(struct net_device *dev)
static void rhine_chip_reset(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
writeb(Cmd1Reset, ioaddr + ChipCmd1);
iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
IOSYNC;
if (readb(ioaddr + ChipCmd1) & Cmd1Reset) {
if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
printk(KERN_INFO "%s: Reset not complete yet. "
"Trying harder.\n", DRV_NAME);
/* Force reset */
if (rp->quirks & rqForceReset)
writeb(0x40, ioaddr + MiscCmd);
iowrite8(0x40, ioaddr + MiscCmd);
/* Reset can take somewhat longer (rare) */
RHINE_WAIT_FOR(!(readb(ioaddr + ChipCmd1) & Cmd1Reset));
RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
}
if (debug > 1)
printk(KERN_INFO "%s: Reset %s.\n", dev->name,
(readb(ioaddr + ChipCmd1) & Cmd1Reset) ?
(ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
"failed" : "succeeded");
}
......@@ -647,8 +637,8 @@ static void enable_mmio(long pioaddr, u32 quirks)
*/
static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
outb(0x20, pioaddr + MACRegEEcsr);
RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
......@@ -664,7 +654,7 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
/* Turn off EEPROM-controlled wake-up (magic packet) */
if (rp->quirks & rqWOL)
writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
}
......@@ -702,9 +692,14 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
u32 quirks;
long pioaddr;
long memaddr;
long ioaddr;
void __iomem *ioaddr;
int io_size, phy_id;
const char *name;
#ifdef USE_MMIO
int bar = 1;
#else
int bar = 0;
#endif
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
......@@ -783,10 +778,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rc)
goto err_out_free_netdev;
#ifdef USE_MMIO
enable_mmio(pioaddr, quirks);
ioaddr = (long) ioremap(memaddr, io_size);
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
rc = -EIO;
printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
......@@ -794,6 +786,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
#ifdef USE_MMIO
enable_mmio(pioaddr, quirks);
/* Check that selected MMIO registers match the PIO ones */
i = 0;
while (mmio_verify_registers[i]) {
......@@ -807,18 +802,17 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
}
#else
ioaddr = pioaddr;
#endif /* USE_MMIO */
dev->base_addr = ioaddr;
dev->base_addr = (unsigned long)ioaddr;
rp->base = ioaddr;
/* Get chip registers into a sane state */
rhine_power_init(dev);
rhine_hw_init(dev, pioaddr);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
if (!is_valid_ether_addr(dev->dev_addr)) {
rc = -EIO;
......@@ -828,7 +822,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
/* For Rhine-I/II, phy_id is loaded from EEPROM */
if (!phy_id)
phy_id = readb(ioaddr + 0x6C);
phy_id = ioread8(ioaddr + 0x6C);
dev->irq = pdev->irq;
......@@ -901,10 +895,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
return 0;
err_out_unmap:
#ifdef USE_MMIO
iounmap((void *)ioaddr);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
#endif
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev(dev);
......@@ -947,7 +939,7 @@ static int alloc_ring(struct net_device* dev)
return 0;
}
void free_ring(struct net_device* dev)
static void free_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
......@@ -1071,102 +1063,102 @@ static void free_tbufs(struct net_device* dev)
static void rhine_check_media(struct net_device *dev, unsigned int init_media)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
mii_check_media(&rp->mii_if, debug, init_media);
if (rp->mii_if.full_duplex)
writeb(readb(ioaddr + ChipCmd1) | Cmd1FDuplex,
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
ioaddr + ChipCmd1);
else
writeb(readb(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
ioaddr + ChipCmd1);
}
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
int i;
for (i = 0; i < 6; i++)
writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
/* Initialize other registers. */
writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
/* Configure initial FIFO thresholds. */
writeb(0x20, ioaddr + TxConfig);
iowrite8(0x20, ioaddr + TxConfig);
rp->tx_thresh = 0x20;
rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
writel(rp->rx_ring_dma, ioaddr + RxRingPtr);
writel(rp->tx_ring_dma, ioaddr + TxRingPtr);
iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
rhine_set_rx_mode(dev);
/* Enable interrupts by setting the interrupt mask. */
writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
IntrTxDone | IntrTxError | IntrTxUnderrun |
IntrPCIErr | IntrStatsMax | IntrLinkChange,
ioaddr + IntrEnable);
writew(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
ioaddr + ChipCmd);
rhine_check_media(dev, 1);
}
/* Enable MII link status auto-polling (required for IntrLinkChange) */
static void rhine_enable_linkmon(long ioaddr)
static void rhine_enable_linkmon(void __iomem *ioaddr)
{
writeb(0, ioaddr + MIICmd);
writeb(MII_BMSR, ioaddr + MIIRegAddr);
writeb(0x80, ioaddr + MIICmd);
iowrite8(0, ioaddr + MIICmd);
iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
iowrite8(0x80, ioaddr + MIICmd);
RHINE_WAIT_FOR((readb(ioaddr + MIIRegAddr) & 0x20));
RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
writeb(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
}
/* Disable MII link status auto-polling (required for MDIO access) */
static void rhine_disable_linkmon(long ioaddr, u32 quirks)
static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
{
writeb(0, ioaddr + MIICmd);
iowrite8(0, ioaddr + MIICmd);
if (quirks & rqRhineI) {
writeb(0x01, ioaddr + MIIRegAddr); // MII_BMSR
iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
/* Can be called from ISR. Evil. */
mdelay(1);
/* 0x80 must be set immediately before turning it off */
writeb(0x80, ioaddr + MIICmd);
iowrite8(0x80, ioaddr + MIICmd);
RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x20);
RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
/* Heh. Now clear 0x80 again. */
writeb(0, ioaddr + MIICmd);
iowrite8(0, ioaddr + MIICmd);
}
else
RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x80);
RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
}
/* Read and write over the MII Management Data I/O (MDIO) interface. */
static int mdio_read(struct net_device *dev, int phy_id, int regnum)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
int result;
rhine_disable_linkmon(ioaddr, rp->quirks);
/* rhine_disable_linkmon already cleared MIICmd */
writeb(phy_id, ioaddr + MIIPhyAddr);
writeb(regnum, ioaddr + MIIRegAddr);
writeb(0x40, ioaddr + MIICmd); /* Trigger read */
RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x40));
result = readw(ioaddr + MIIData);
iowrite8(phy_id, ioaddr + MIIPhyAddr);
iowrite8(regnum, ioaddr + MIIRegAddr);
iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
result = ioread16(ioaddr + MIIData);
rhine_enable_linkmon(ioaddr);
return result;
......@@ -1175,16 +1167,16 @@ static int mdio_read(struct net_device *dev, int phy_id, int regnum)
static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
rhine_disable_linkmon(ioaddr, rp->quirks);
/* rhine_disable_linkmon already cleared MIICmd */
writeb(phy_id, ioaddr + MIIPhyAddr);
writeb(regnum, ioaddr + MIIRegAddr);
writew(value, ioaddr + MIIData);
writeb(0x20, ioaddr + MIICmd); /* Trigger write */
RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x20));
iowrite8(phy_id, ioaddr + MIIPhyAddr);
iowrite8(regnum, ioaddr + MIIRegAddr);
iowrite16(value, ioaddr + MIIData);
iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
rhine_enable_linkmon(ioaddr);
}
......@@ -1192,7 +1184,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value
static int rhine_open(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
int rc;
rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
......@@ -1214,7 +1206,7 @@ static int rhine_open(struct net_device *dev)
if (debug > 2)
printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
"MII status: %4.4x.\n",
dev->name, readw(ioaddr + ChipCmd),
dev->name, ioread16(ioaddr + ChipCmd),
mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
netif_start_queue(dev);
......@@ -1225,11 +1217,11 @@ static int rhine_open(struct net_device *dev)
static void rhine_tx_timeout(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
"%4.4x, resetting...\n",
dev->name, readw(ioaddr + IntrStatus),
dev->name, ioread16(ioaddr + IntrStatus),
mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
/* protect against concurrent rx interrupts */
......@@ -1258,7 +1250,7 @@ static void rhine_tx_timeout(struct net_device *dev)
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
unsigned entry;
/* Caution: the write order is important here, set the field
......@@ -1276,7 +1268,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
rp->tx_skbuff[entry] = skb;
if ((rp->quirks & rqRhineI) &&
(((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
(((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
/* Must use alignment buffer. */
if (skb->len > PKT_BUF_SZ) {
/* packet too long, drop it */
......@@ -1311,7 +1303,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
/* Non-x86 Todo: explicitly flush cache lines here. */
/* Wake the potentially-idle transmit channel */
writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
IOSYNC;
......@@ -1334,20 +1326,19 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
{
struct net_device *dev = dev_instance;
long ioaddr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u32 intr_status;
int boguscnt = max_interrupt_work;
int handled = 0;
ioaddr = dev->base_addr;
while ((intr_status = get_intr_status(dev))) {
handled = 1;
/* Acknowledge all of the current interrupt sources ASAP. */
if (intr_status & IntrTxDescRace)
writeb(0x08, ioaddr + IntrStatus2);
writew(intr_status & 0xffff, ioaddr + IntrStatus);
iowrite8(0x08, ioaddr + IntrStatus2);
iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
IOSYNC;
if (debug > 4)
......@@ -1361,9 +1352,9 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
if (intr_status & IntrTxErrSummary) {
/* Avoid scavenging before Tx engine turned off */
RHINE_WAIT_FOR(!(readb(ioaddr+ChipCmd) & CmdTxOn));
RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
if (debug > 2 &&
readb(ioaddr+ChipCmd) & CmdTxOn)
ioread8(ioaddr+ChipCmd) & CmdTxOn)
printk(KERN_WARNING "%s: "
"rhine_interrupt() Tx engine"
"still on.\n", dev->name);
......@@ -1387,7 +1378,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
if (debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
dev->name, readw(ioaddr + IntrStatus));
dev->name, ioread16(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
......@@ -1582,16 +1573,16 @@ static void rhine_rx(struct net_device *dev)
* these, for others the counters are set to 1 when written to and
* instead cleared when read. So we clear them both ways ...
*/
static inline void clear_tally_counters(const long ioaddr)
static inline void clear_tally_counters(void __iomem *ioaddr)
{
writel(0, ioaddr + RxMissed);
readw(ioaddr + RxCRCErrs);
readw(ioaddr + RxMissed);
iowrite32(0, ioaddr + RxMissed);
ioread16(ioaddr + RxCRCErrs);
ioread16(ioaddr + RxMissed);
}
static void rhine_restart_tx(struct net_device *dev) {
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
int entry = rp->dirty_tx % TX_RING_SIZE;
u32 intr_status;
......@@ -1604,12 +1595,12 @@ static void rhine_restart_tx(struct net_device *dev) {
if ((intr_status & IntrTxErrSummary) == 0) {
/* We know better than the chip where it should continue. */
writel(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
ioaddr + TxRingPtr);
writeb(readb(ioaddr + ChipCmd) | CmdTxOn,
iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
ioaddr + ChipCmd);
writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
IOSYNC;
}
......@@ -1626,15 +1617,15 @@ static void rhine_restart_tx(struct net_device *dev) {
static void rhine_error(struct net_device *dev, int intr_status)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
spin_lock(&rp->lock);
if (intr_status & IntrLinkChange)
rhine_check_media(dev, 0);
if (intr_status & IntrStatsMax) {
rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
clear_tally_counters(ioaddr);
}
if (intr_status & IntrTxAborted) {
......@@ -1644,7 +1635,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
}
if (intr_status & IntrTxUnderrun) {
if (rp->tx_thresh < 0xE0)
writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
if (debug > 1)
printk(KERN_INFO "%s: Transmitter underrun, Tx "
"threshold now %2.2x.\n",
......@@ -1659,7 +1650,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
(intr_status & (IntrTxAborted |
IntrTxUnderrun | IntrTxDescRace)) == 0) {
if (rp->tx_thresh < 0xE0) {
writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
}
if (debug > 1)
printk(KERN_INFO "%s: Unspecified error. Tx "
......@@ -1684,12 +1675,12 @@ static void rhine_error(struct net_device *dev, int intr_status)
static struct net_device_stats *rhine_get_stats(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
unsigned long flags;
spin_lock_irqsave(&rp->lock, flags);
rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
clear_tally_counters(ioaddr);
spin_unlock_irqrestore(&rp->lock, flags);
......@@ -1699,7 +1690,7 @@ static struct net_device_stats *rhine_get_stats(struct net_device *dev)
static void rhine_set_rx_mode(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
u32 mc_filter[2]; /* Multicast hash filter */
u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
......@@ -1708,13 +1699,13 @@ static void rhine_set_rx_mode(struct net_device *dev)
printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
dev->name);
rx_mode = 0x1C;
writel(0xffffffff, ioaddr + MulticastFilter0);
writel(0xffffffff, ioaddr + MulticastFilter1);
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
} else if ((dev->mc_count > multicast_filter_limit)
|| (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writel(0xffffffff, ioaddr + MulticastFilter0);
writel(0xffffffff, ioaddr + MulticastFilter1);
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
rx_mode = 0x0C;
} else {
struct dev_mc_list *mclist;
......@@ -1726,11 +1717,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
writel(mc_filter[0], ioaddr + MulticastFilter0);
writel(mc_filter[1], ioaddr + MulticastFilter1);
iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
rx_mode = 0x0C;
}
writeb(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
}
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
......@@ -1854,8 +1845,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int rhine_close(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
spin_lock_irq(&rp->lock);
......@@ -1864,16 +1855,16 @@ static int rhine_close(struct net_device *dev)
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
"status was %4.4x.\n",
dev->name, readw(ioaddr + ChipCmd));
dev->name, ioread16(ioaddr + ChipCmd));
/* Switch to loopback mode to avoid hardware races. */
writeb(rp->tx_thresh | 0x02, ioaddr + TxConfig);
iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
/* Disable interrupts by clearing the interrupt mask. */
writew(0x0000, ioaddr + IntrEnable);
iowrite16(0x0000, ioaddr + IntrEnable);
/* Stop the chip's Tx and Rx processes. */
writew(CmdStop, ioaddr + ChipCmd);
iowrite16(CmdStop, ioaddr + ChipCmd);
spin_unlock_irq(&rp->lock);
......@@ -1889,15 +1880,13 @@ static int rhine_close(struct net_device *dev)
static void __devexit rhine_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
unregister_netdev(dev);
pci_iounmap(pdev, rp->base);
pci_release_regions(pdev);
#ifdef USE_MMIO
iounmap((char *)(dev->base_addr));
#endif
free_netdev(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
......@@ -1908,33 +1897,40 @@ static void rhine_shutdown (struct device *gendev)
struct pci_dev *pdev = to_pci_dev(gendev);
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
long ioaddr = dev->base_addr;
void __iomem *ioaddr = rp->base;
rhine_power_init(dev);
/* Make sure we use pattern 0, 1 and not 4, 5 */
if (rp->quirks & rq6patterns)
writeb(0x04, ioaddr + 0xA7);
if (rp->wolopts & WAKE_MAGIC)
writeb(WOLmagic, ioaddr + WOLcrSet);
iowrite8(0x04, ioaddr + 0xA7);
if (rp->wolopts & WAKE_MAGIC) {
iowrite8(WOLmagic, ioaddr + WOLcrSet);
/*
* Turn EEPROM-controlled wake-up back on -- some hardware may
* not cooperate otherwise.
*/
iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
}
if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
writeb(WOLbmcast, ioaddr + WOLcgSet);
iowrite8(WOLbmcast, ioaddr + WOLcgSet);
if (rp->wolopts & WAKE_PHY)
writeb(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
if (rp->wolopts & WAKE_UCAST)
writeb(WOLucast, ioaddr + WOLcrSet);
iowrite8(WOLucast, ioaddr + WOLcrSet);
/* Enable legacy WOL (for old motherboards) */
writeb(0x01, ioaddr + PwcfgSet);
writeb(readb(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
if (rp->wolopts) {
/* Enable legacy WOL (for old motherboards) */
iowrite8(0x01, ioaddr + PwcfgSet);
iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
}
/* Hit power state D3 (sleep) */
writeb(readb(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
/* TODO: Check use of pci_enable_wake() */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment