Commit bde28a59 authored by Jeff Garzik's avatar Jeff Garzik

Merge pobox.com:/spare/repo/netdev-2.6/prism54

into pobox.com:/spare/repo/netdev-2.6/ALL
parents bd0f179f 7334f55d
......@@ -306,10 +306,10 @@ CONFIG_EEPRO100=y
# CONFIG_R8169 is not set
# CONFIG_SK98LIN is not set
# CONFIG_TIGON3 is not set
CONFIG_MV64340_ETH=y
CONFIG_MV64340_ETH_0=y
CONFIG_MV64340_ETH_1=y
CONFIG_MV64340_ETH_2=y
CONFIG_MV643XX_ETH=y
CONFIG_MV643XX_ETH_0=y
CONFIG_MV643XX_ETH_1=y
CONFIG_MV643XX_ETH_2=y
#
# Ethernet (10000 Mbit)
......
......@@ -304,7 +304,7 @@ CONFIG_NET_ETHERNET=y
# CONFIG_R8169 is not set
# CONFIG_SK98LIN is not set
# CONFIG_TIGON3 is not set
# CONFIG_MV64340_ETH is not set
# CONFIG_MV643XX_ETH is not set
#
# Ethernet (10000 Mbit)
......
......@@ -40,7 +40,7 @@ const char *get_system_type(void)
return "Momentum Jaguar-ATX";
}
#ifdef CONFIG_MV64340_ETH
#ifdef CONFIG_MV643XX_ETH
extern unsigned char prom_mac_addr_base[6];
static void burn_clocks(void)
......@@ -230,7 +230,7 @@ void __init prom_init(void)
mips_machgroup = MACH_GROUP_MOMENCO;
mips_machtype = MACH_MOMENCO_JAGUAR_ATX;
#ifdef CONFIG_MV64340_ETH
#ifdef CONFIG_MV643XX_ETH
/* get the base MAC address for on-board ethernet ports */
get_mac(prom_mac_addr_base);
#endif
......
......@@ -32,7 +32,7 @@ struct callvectors* debug_vectors;
extern unsigned long marvell_base;
extern unsigned long cpu_clock;
#ifdef CONFIG_MV64340_ETH
#ifdef CONFIG_MV643XX_ETH
extern unsigned char prom_mac_addr_base[6];
#endif
......@@ -45,7 +45,7 @@ const char *get_system_type(void)
#endif
}
#ifdef CONFIG_MV64340_ETH
#ifdef CONFIG_MV643XX_ETH
static void burn_clocks(void)
{
int i;
......@@ -227,7 +227,7 @@ void __init prom_init(void)
mips_machgroup = MACH_GROUP_MOMENCO;
mips_machtype = MACH_MOMENCO_OCELOT_C;
#ifdef CONFIG_MV64340_ETH
#ifdef CONFIG_MV643XX_ETH
/* get the base MAC address for on-board ethernet ports */
get_mac(prom_mac_addr_base);
#endif
......
......@@ -41,6 +41,7 @@
module by all drivers that require it.
Alan Cox : Spinlocking work, added 'BUG_83C690'
Paul Gortmaker : Separate out Tx timeout code from Tx path.
Paul Gortmaker : Remove old unused single Tx buffer code.
Sources:
The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
......@@ -289,8 +290,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
#ifdef EI_PINGPONG
/*
* We have two Tx slots available for use. Find the first free
* slot, and then perform some sanity checks. With two Tx bufs,
......@@ -309,7 +308,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
else if (ei_local->tx2 == 0)
{
output_page = ei_local->tx_start_page + TX_1X_PAGES;
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
if (ei_debug && ei_local->tx1 > 0)
printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
......@@ -366,28 +365,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
else
netif_start_queue(dev);
#else /* EI_PINGPONG */
/*
* Only one Tx buffer in use. You need two Tx bufs to come close to
* back-to-back transmits. Expect a 20 -> 25% performance hit on
* reasonable hardware if you only use one Tx buffer.
*/
if (length == send_length)
ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
else {
memset(scratch, 0, ETH_ZLEN);
memcpy(scratch, skb->data, skb->len);
ei_block_output(dev, ETH_ZLEN, scratch, ei_local->tx_start_page);
}
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, ei_local->tx_start_page);
dev->trans_start = jiffies;
netif_stop_queue(dev);
#endif /* EI_PINGPONG */
/* Turn 8390 interrupts back on. */
ei_local->irqlock = 0;
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
......@@ -590,8 +567,6 @@ static void ei_tx_intr(struct net_device *dev)
outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
#ifdef EI_PINGPONG
/*
* There are two Tx buffers, see which one finished, and trigger
* the send of another one if it exists.
......@@ -634,13 +609,6 @@ static void ei_tx_intr(struct net_device *dev)
// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
// dev->name, ei_local->lasttx);
#else /* EI_PINGPONG */
/*
* Single Tx buffer: mark it free so another packet can be loaded.
*/
ei_local->txing = 0;
#endif
/* Minimize Tx latency: update the statistics after we restart TXing. */
if (status & ENTSR_COL)
ei_local->stat.collisions++;
......
......@@ -12,17 +12,7 @@
#include <linux/ioport.h>
#include <linux/skbuff.h>
#define TX_2X_PAGES 12
#define TX_1X_PAGES 6
/* Should always use two Tx slots to get back-to-back transmits. */
#define EI_PINGPONG
#ifdef EI_PINGPONG
#define TX_PAGES TX_2X_PAGES
#else
#define TX_PAGES TX_1X_PAGES
#endif
#define TX_PAGES 12 /* Two Tx slots */
#define ETHER_ADDR_LEN 6
......
......@@ -2131,6 +2131,45 @@ config TIGON3
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
config GIANFAR
tristate "Gianfar Ethernet"
depends on 85xx
help
This driver supports the Gigabit TSEC on the MPC85xx
family of chips, and the FEC on the 8540
config GFAR_NAPI
bool "NAPI Support"
depends on GIANFAR
config MV643XX_ETH
tristate "MV-643XX Ethernet support"
depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX
help
This driver supports the gigabit Ethernet on the Marvell MV643XX
chipset which is used in the Momenco Ocelot C and Jaguar ATX.
config MV643XX_ETH_0
bool "MV-643XX Port 0"
depends on MV643XX_ETH
help
This enables support for Port 0 of the Marvell MV643XX Gigabit
Ethernet.
config MV643XX_ETH_1
bool "MV-643XX Port 1"
depends on MV643XX_ETH
help
This enables support for Port 1 of the Marvell MV643XX Gigabit
Ethernet.
config MV643XX_ETH_2
bool "MV-643XX Port 2"
depends on MV643XX_ETH
help
This enables support for Port 2 of the Marvell MV643XX Gigabit
Ethernet.
endmenu
#
......
......@@ -10,6 +10,7 @@ obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_GIANFAR) += gianfar.o gianfar_ethtool.o gianfar_phy.o
#
# link order important here
......@@ -95,6 +96,8 @@ obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_FORCEDETH) += forcedeth.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
......
......@@ -87,9 +87,8 @@
* cb_to_use is the next CB to use for queuing a command; cb_to_clean
* is the next CB to check for completion; cb_to_send is the first
* CB to start on in case of a previous failure to resume. CB clean
* up happens in interrupt context in response to a CU interrupt, or
* in dev->poll in the case where NAPI is enabled. cbs_avail keeps
* track of number of free CB resources available.
* up happens in interrupt context in response to a CU interrupt.
* cbs_avail keeps track of number of free CB resources available.
*
* Hardware padding of short packets to minimum packet size is
* enabled. 82557 pads with 7Eh, while the later controllers pad
......@@ -112,9 +111,8 @@
* replacement RFDs cannot be allocated, or the RU goes non-active,
* the RU must be restarted. Frame arrival generates an interrupt,
* and Rx indication and re-allocation happen in the same context,
* therefore no locking is required. If NAPI is enabled, this work
* happens in dev->poll. A software-generated interrupt is gen-
* erated from the watchdog to recover from a failed allocation
* therefore no locking is required. A software-generated interrupt
* is generated from the watchdog to recover from a failed allocation
* senario where all Rx resources have been indicated and none re-
* placed.
*
......@@ -126,8 +124,6 @@
* supported. Tx Scatter/Gather is not supported. Jumbo Frames is
* not supported (hardware limitation).
*
* NAPI support is enabled with CONFIG_E100_NAPI.
*
* MagicPacket(tm) WoL support is enabled/disabled via ethtool.
*
* Thanks to JC (jchapman@katalix.com) for helping with
......@@ -158,7 +154,7 @@
#define DRV_NAME "e100"
#define DRV_VERSION "3.0.18"
#define DRV_VERSION "3.0.22-NAPI"
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation"
#define PFX DRV_NAME ": "
......@@ -1463,11 +1459,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
nic->net_stats.rx_packets++;
nic->net_stats.rx_bytes += actual_size;
nic->netdev->last_rx = jiffies;
#ifdef CONFIG_E100_NAPI
netif_receive_skb(skb);
#else
netif_rx(skb);
#endif
if(work_done)
(*work_done)++;
}
......@@ -1562,20 +1554,12 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
if(stat_ack & stat_ack_rnr)
nic->ru_running = 0;
#ifdef CONFIG_E100_NAPI
e100_disable_irq(nic);
netif_rx_schedule(netdev);
#else
if(stat_ack & stat_ack_rx)
e100_rx_clean(nic, NULL, 0);
if(stat_ack & stat_ack_tx)
e100_tx_clean(nic);
#endif
return IRQ_HANDLED;
}
#ifdef CONFIG_E100_NAPI
static int e100_poll(struct net_device *netdev, int *budget)
{
struct nic *nic = netdev_priv(netdev);
......@@ -1598,7 +1582,6 @@ static int e100_poll(struct net_device *netdev, int *budget)
return 1;
}
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
static void e100_netpoll(struct net_device *netdev)
......@@ -2135,10 +2118,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
netdev->tx_timeout = e100_tx_timeout;
netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
#ifdef CONFIG_E100_NAPI
netdev->poll = e100_poll;
netdev->weight = E100_NAPI_WEIGHT;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = e100_netpoll;
#endif
......
......@@ -80,8 +80,6 @@
These may be modified when a driver module is loaded.*/
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 32;
/* Used to pass the full-duplex flag, etc. */
#define MAX_UNITS 8 /* More are supported, limit only on options */
......@@ -99,9 +97,9 @@ static int rx_copybreak;
Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 16
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
#define RX_RING_SIZE 32
#define TX_RING_SIZE 256
#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
#define RX_RING_SIZE 256
#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
......@@ -152,12 +150,10 @@ MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_PARM(debug, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
......@@ -289,6 +285,12 @@ enum CommandBits {
StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
};
#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
#define EpicNapiEvent (TxEmpty | TxDone | \
RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
static u16 media2miictl[16] = {
0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 };
......@@ -327,9 +329,12 @@ struct epic_private {
/* Ring pointers. */
spinlock_t lock; /* Group with Tx control cache line. */
spinlock_t napi_lock;
unsigned int reschedule_in_poll;
unsigned int cur_tx, dirty_tx;
unsigned int cur_rx, dirty_rx;
u32 irq_mask;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
struct pci_dev *pci_dev; /* PCI bus location. */
......@@ -356,7 +361,8 @@ static void epic_timer(unsigned long data);
static void epic_tx_timeout(struct net_device *dev);
static void epic_init_ring(struct net_device *dev);
static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int epic_rx(struct net_device *dev);
static int epic_rx(struct net_device *dev, int budget);
static int epic_poll(struct net_device *dev, int *budget);
static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct ethtool_ops netdev_ethtool_ops;
......@@ -375,7 +381,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
int irq;
struct net_device *dev;
struct epic_private *ep;
int i, option = 0, duplex = 0;
int i, ret, option = 0, duplex = 0;
void *ring_space;
dma_addr_t ring_dma;
......@@ -389,29 +395,33 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
card_idx++;
i = pci_enable_device(pdev);
if (i)
return i;
ret = pci_enable_device(pdev);
if (ret)
goto out;
irq = pdev->irq;
if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
return -ENODEV;
ret = -ENODEV;
goto err_out_disable;
}
pci_set_master(pdev);
ret = pci_request_regions(pdev, DRV_NAME);
if (ret < 0)
goto err_out_disable;
ret = -ENOMEM;
dev = alloc_etherdev(sizeof (*ep));
if (!dev) {
printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
return -ENOMEM;
goto err_out_free_res;
}
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_request_regions(pdev, DRV_NAME))
goto err_out_free_netdev;
#ifdef USE_IO_OPS
ioaddr = pci_resource_start (pdev, 0);
#else
......@@ -419,7 +429,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
if (!ioaddr) {
printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
goto err_out_free_res;
goto err_out_free_netdev;
}
#endif
......@@ -456,7 +466,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
dev->base_addr = ioaddr;
dev->irq = irq;
spin_lock_init (&ep->lock);
spin_lock_init(&ep->lock);
spin_lock_init(&ep->napi_lock);
ep->reschedule_in_poll = 0;
/* Bring the chip out of low-power mode. */
outl(0x4200, ioaddr + GENCTL);
......@@ -486,6 +498,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ep->pci_dev = pdev;
ep->chip_id = chip_idx;
ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
ep->irq_mask =
(ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
| CntFull | TxUnderrun | EpicNapiEvent;
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later, but
......@@ -540,10 +555,12 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->tx_timeout = &epic_tx_timeout;
dev->poll = epic_poll;
dev->weight = 64;
i = register_netdev(dev);
if (i)
goto err_out_unmap_tx;
ret = register_netdev(dev);
if (ret < 0)
goto err_out_unmap_rx;
printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
......@@ -551,19 +568,24 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
printk("%2.2x:", dev->dev_addr[i]);
printk("%2.2x.\n", dev->dev_addr[i]);
return 0;
out:
return ret;
err_out_unmap_rx:
pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
err_out_iounmap:
#ifndef USE_IO_OPS
iounmap(ioaddr);
err_out_free_res:
#endif
pci_release_regions(pdev);
err_out_free_netdev:
#endif
free_netdev(dev);
return -ENODEV;
err_out_free_res:
pci_release_regions(pdev);
err_out_disable:
pci_disable_device(pdev);
goto out;
}
/* Serial EEPROM section. */
......@@ -589,6 +611,38 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
#define EE_READ256_CMD (6 << 8)
#define EE_ERASE_CMD (7 << 6)
static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
{
long ioaddr = dev->base_addr;
outl(0x00000000, ioaddr + INTMASK);
}
static inline void __epic_pci_commit(long ioaddr)
{
#ifndef USE_IO_OPS
inl(ioaddr + INTMASK);
#endif
}
static inline void epic_napi_irq_off(struct net_device *dev,
struct epic_private *ep)
{
long ioaddr = dev->base_addr;
outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
__epic_pci_commit(ioaddr);
}
static inline void epic_napi_irq_on(struct net_device *dev,
struct epic_private *ep)
{
long ioaddr = dev->base_addr;
/* No need to commit possible posted write */
outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
}
static int __devinit read_eeprom(long ioaddr, int location)
{
int i;
......@@ -749,9 +803,8 @@ static int epic_open(struct net_device *dev)
/* Enable interrupts by setting the interrupt mask. */
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
| CntFull | TxUnderrun | TxDone | TxEmpty
| RxError | RxOverflow | RxFull | RxHeader | RxDone,
ioaddr + INTMASK);
| CntFull | TxUnderrun
| RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
if (debug > 1)
printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
......@@ -792,7 +845,7 @@ static void epic_pause(struct net_device *dev)
}
/* Remove the packets on the Rx queue. */
epic_rx(dev);
epic_rx(dev, RX_RING_SIZE);
}
static void epic_restart(struct net_device *dev)
......@@ -838,9 +891,9 @@ static void epic_restart(struct net_device *dev)
/* Enable interrupts by setting the interrupt mask. */
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
| CntFull | TxUnderrun | TxDone | TxEmpty
| RxError | RxOverflow | RxFull | RxHeader | RxDone,
ioaddr + INTMASK);
| CntFull | TxUnderrun
| RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
" interrupt %4.4x.\n",
dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
......@@ -926,7 +979,6 @@ static void epic_init_ring(struct net_device *dev)
int i;
ep->tx_full = 0;
ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
ep->dirty_tx = ep->cur_tx = 0;
ep->cur_rx = ep->dirty_rx = 0;
ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
......@@ -1026,6 +1078,76 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
int status)
{
struct net_device_stats *stats = &ep->stats;
#ifndef final_version
/* There was an major error, log it. */
if (debug > 1)
printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
dev->name, status);
#endif
stats->tx_errors++;
if (status & 0x1050)
stats->tx_aborted_errors++;
if (status & 0x0008)
stats->tx_carrier_errors++;
if (status & 0x0040)
stats->tx_window_errors++;
if (status & 0x0010)
stats->tx_fifo_errors++;
}
static void epic_tx(struct net_device *dev, struct epic_private *ep)
{
unsigned int dirty_tx, cur_tx;
/*
* Note: if this lock becomes a problem we can narrow the locked
* region at the cost of occasionally grabbing the lock more times.
*/
cur_tx = ep->cur_tx;
for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
struct sk_buff *skb;
int entry = dirty_tx % TX_RING_SIZE;
int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
if (txstatus & DescOwn)
break; /* It still hasn't been Txed */
if (likely(txstatus & 0x0001)) {
ep->stats.collisions += (txstatus >> 8) & 15;
ep->stats.tx_packets++;
ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
} else
epic_tx_error(dev, ep, txstatus);
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
ep->tx_skbuff[entry] = NULL;
}
#ifndef final_version
if (cur_tx - dirty_tx > TX_RING_SIZE) {
printk(KERN_WARNING
"%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
dev->name, dirty_tx, cur_tx, ep->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
ep->dirty_tx = dirty_tx;
if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
/* The ring is no longer full, allow new TX entries. */
ep->tx_full = 0;
netif_wake_queue(dev);
}
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
......@@ -1033,135 +1155,71 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *r
struct net_device *dev = dev_instance;
struct epic_private *ep = dev->priv;
long ioaddr = dev->base_addr;
int status, boguscnt = max_interrupt_work;
unsigned int handled = 0;
int status;
do {
status = inl(ioaddr + INTSTAT);
/* Acknowledge all of the current interrupt sources ASAP. */
outl(status & 0x00007fff, ioaddr + INTSTAT);
status = inl(ioaddr + INTSTAT);
/* Acknowledge all of the current interrupt sources ASAP. */
outl(status & EpicNormalEvent, ioaddr + INTSTAT);
if (debug > 4)
printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
"intstat=%#8.8x.\n",
dev->name, status, (int)inl(ioaddr + INTSTAT));
if (debug > 4) {
printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
"intstat=%#8.8x.\n", dev->name, status,
(int)inl(ioaddr + INTSTAT));
}
if ((status & IntrSummary) == 0)
break;
handled = 1;
if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
epic_rx(dev);
if (status & (TxEmpty | TxDone)) {
unsigned int dirty_tx, cur_tx;
/* Note: if this lock becomes a problem we can narrow the locked
region at the cost of occasionally grabbing the lock more
times. */
spin_lock(&ep->lock);
cur_tx = ep->cur_tx;
dirty_tx = ep->dirty_tx;
for (; cur_tx - dirty_tx > 0; dirty_tx++) {
struct sk_buff *skb;
int entry = dirty_tx % TX_RING_SIZE;
int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
if (txstatus & DescOwn)
break; /* It still hasn't been Txed */
if ( ! (txstatus & 0x0001)) {
/* There was an major error, log it. */
#ifndef final_version
if (debug > 1)
printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
dev->name, txstatus);
#endif
ep->stats.tx_errors++;
if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
if (txstatus & 0x0040) ep->stats.tx_window_errors++;
if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
} else {
ep->stats.collisions += (txstatus >> 8) & 15;
ep->stats.tx_packets++;
ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
}
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
ep->tx_skbuff[entry] = NULL;
}
if ((status & IntrSummary) == 0)
goto out;
#ifndef final_version
if (cur_tx - dirty_tx > TX_RING_SIZE) {
printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
dev->name, dirty_tx, cur_tx, ep->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
ep->dirty_tx = dirty_tx;
if (ep->tx_full
&& cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
/* The ring is no longer full, allow new TX entries. */
ep->tx_full = 0;
spin_unlock(&ep->lock);
netif_wake_queue(dev);
} else
spin_unlock(&ep->lock);
}
handled = 1;
/* Check uncommon events all at once. */
if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
PCIBusErr170 | PCIBusErr175)) {
if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
break;
/* Always update the error counts to avoid overhead later. */
ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
if (status & TxUnderrun) { /* Tx FIFO underflow. */
ep->stats.tx_fifo_errors++;
outl(ep->tx_threshold += 128, ioaddr + TxThresh);
/* Restart the transmit process. */
outl(RestartTx, ioaddr + COMMAND);
}
if (status & RxOverflow) { /* Missed a Rx frame. */
ep->stats.rx_errors++;
}
if (status & (RxOverflow | RxFull))
outw(RxQueued, ioaddr + COMMAND);
if (status & PCIBusErr170) {
printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
dev->name, status);
epic_pause(dev);
epic_restart(dev);
}
/* Clear all error sources. */
outl(status & 0x7f18, ioaddr + INTSTAT);
if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
spin_lock(&ep->napi_lock);
if (netif_rx_schedule_prep(dev)) {
epic_napi_irq_off(dev, ep);
__netif_rx_schedule(dev);
} else
ep->reschedule_in_poll++;
spin_unlock(&ep->napi_lock);
}
status &= ~EpicNapiEvent;
/* Check uncommon events all at once. */
if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
if (status == EpicRemoved)
goto out;
/* Always update the error counts to avoid overhead later. */
ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
if (status & TxUnderrun) { /* Tx FIFO underflow. */
ep->stats.tx_fifo_errors++;
outl(ep->tx_threshold += 128, ioaddr + TxThresh);
/* Restart the transmit process. */
outl(RestartTx, ioaddr + COMMAND);
}
if (--boguscnt < 0) {
printk(KERN_ERR "%s: Too much work at interrupt, "
"IntrStatus=0x%8.8x.\n",
dev->name, status);
/* Clear all interrupt sources. */
outl(0x0001ffff, ioaddr + INTSTAT);
break;
if (status & PCIBusErr170) {
printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
dev->name, status);
epic_pause(dev);
epic_restart(dev);
}
} while (1);
/* Clear all error sources. */
outl(status & 0x7f18, ioaddr + INTSTAT);
}
if (debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
dev->name, status);
out:
if (debug > 3) {
printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
dev->name, status);
}
return IRQ_RETVAL(handled);
}
static int epic_rx(struct net_device *dev)
static int epic_rx(struct net_device *dev, int budget)
{
struct epic_private *ep = dev->priv;
int entry = ep->cur_rx % RX_RING_SIZE;
......@@ -1171,6 +1229,10 @@ static int epic_rx(struct net_device *dev)
if (debug > 4)
printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
ep->rx_ring[entry].rxstatus);
if (rx_work_limit > budget)
rx_work_limit = budget;
/* If we own the next entry, it's a new packet. Send it up. */
while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
......@@ -1226,7 +1288,7 @@ static int epic_rx(struct net_device *dev)
ep->rx_skbuff[entry] = NULL;
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
netif_receive_skb(skb);
dev->last_rx = jiffies;
ep->stats.rx_packets++;
ep->stats.rx_bytes += pkt_len;
......@@ -1254,6 +1316,65 @@ static int epic_rx(struct net_device *dev)
return work_done;
}
static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
{
long ioaddr = dev->base_addr;
int status;
status = inl(ioaddr + INTSTAT);
if (status == EpicRemoved)
return;
if (status & RxOverflow) /* Missed a Rx frame. */
ep->stats.rx_errors++;
if (status & (RxOverflow | RxFull))
outw(RxQueued, ioaddr + COMMAND);
}
static int epic_poll(struct net_device *dev, int *budget)
{
struct epic_private *ep = dev->priv;
int work_done, orig_budget;
long ioaddr = dev->base_addr;
orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
rx_action:
epic_tx(dev, ep);
work_done = epic_rx(dev, *budget);
epic_rx_err(dev, ep);
*budget -= work_done;
dev->quota -= work_done;
if (netif_running(dev) && (work_done < orig_budget)) {
unsigned long flags;
int more;
/* A bit baroque but it avoids a (space hungry) spin_unlock */
spin_lock_irqsave(&ep->napi_lock, flags);
more = ep->reschedule_in_poll;
if (!more) {
__netif_rx_complete(dev);
outl(EpicNapiEvent, ioaddr + INTSTAT);
epic_napi_irq_on(dev, ep);
} else
ep->reschedule_in_poll--;
spin_unlock_irqrestore(&ep->napi_lock, flags);
if (more)
goto rx_action;
}
return (work_done >= orig_budget);
}
static int epic_close(struct net_device *dev)
{
long ioaddr = dev->base_addr;
......@@ -1268,9 +1389,13 @@ static int epic_close(struct net_device *dev)
dev->name, (int)inl(ioaddr + INTSTAT));
del_timer_sync(&ep->timer);
epic_pause(dev);
epic_disable_int(dev, ep);
free_irq(dev->irq, dev);
epic_pause(dev);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
skb = ep->rx_skbuff[i];
......@@ -1491,6 +1616,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
#endif
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
/* pci_power_off(pdev, -1); */
}
......
/*
* drivers/net/gianfar.c
*
* Gianfar Ethernet Driver
* Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
* Based on 8260_io/fcc_enet.c
*
* Author: Andy Fleming
* Maintainer: Kumar Gala (kumar.gala@freescale.com)
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Gianfar: AKA Lambda Draconis, "Dragon"
* RA 11 31 24.2
* Dec +69 19 52
* V 3.84
* B-V +1.62
*
* Theory of operation
* This driver is designed for the Triple-speed Ethernet
* controllers on the Freescale 8540/8560 integrated processors,
* as well as the Fast Ethernet Controller on the 8540.
*
* The driver is initialized through OCP. Structures which
* define the configuration needed by the board are defined in a
* board structure in arch/ppc/platforms (though I do not
* discount the possibility that other architectures could one
* day be supported. One assumption the driver currently makes
* is that the PHY is configured in such a way to advertise all
* capabilities. This is a sensible default, and on certain
* PHYs, changing this default encounters substantial errata
* issues. Future versions may remove this requirement, but for
* now, it is best for the firmware to ensure this is the case.
*
* The Gianfar Ethernet Controller uses a ring of buffer
* descriptors. The beginning is indicated by a register
* pointing to the physical address of the start of the ring.
* The end is determined by a "wrap" bit being set in the
* last descriptor of the ring.
*
* When a packet is received, the RXF bit in the
* IEVENT register is set, triggering an interrupt when the
* corresponding bit in the IMASK register is also set (if
* interrupt coalescing is active, then the interrupt may not
* happen immediately, but will wait until either a set number
* of frames or amount of time have passed.). In NAPI, the
* interrupt handler will signal there is work to be done, and
* exit. Without NAPI, the packet(s) will be handled
* immediately. Both methods will start at the last known empty
* descriptor, and process every subsequent descriptor until there
* are none left with data (NAPI will stop after a set number of
* packets to give time to other tasks, but will eventually
* process all the packets). The data arrives inside a
* pre-allocated skb, and so after the skb is passed up to the
* stack, a new skb must be allocated, and the address field in
* the buffer descriptor must be updated to indicate this new
* skb.
*
* When the kernel requests that a packet be transmitted, the
* driver starts where it left off last time, and points the
* descriptor at the buffer which was passed in. The driver
* then informs the DMA engine that there are packets ready to
* be transmitted. Once the controller is finished transmitting
* the packet, an interrupt may be triggered (under the same
* conditions as for reception, but depending on the TXF bit).
* The driver then cleans up the buffer.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
#include "gianfar.h"
#include "gianfar_phy.h"
#ifdef CONFIG_NET_FASTROUTE
#include <linux/if_arp.h>
#include <net/ip.h>
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41)
#define irqreturn_t void
#define IRQ_HANDLED
#endif
#define TX_TIMEOUT (1*HZ)
#define SKB_ALLOC_TIMEOUT 1000000
#undef BRIEF_GFAR_ERRORS
#undef VERBOSE_GFAR_ERRORS
#ifdef CONFIG_GFAR_NAPI
#define RECEIVE(x) netif_receive_skb(x)
#else
#define RECEIVE(x) netif_rx(x)
#endif
#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.0, "
char gfar_driver_name[] = "Gianfar Ethernet";
char gfar_driver_version[] = "1.0";
int startup_gfar(struct net_device *dev);
static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
static struct net_device_stats *gfar_get_stats(struct net_device *dev);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static void gfar_phy_change(void *data);
static void gfar_phy_timer(unsigned long data);
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
static int gfar_probe(struct ocp_device *ocpdev);
static void gfar_remove(struct ocp_device *ocpdev);
void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
#ifdef CONFIG_GFAR_NAPI
static int gfar_poll(struct net_device *dev, int *budget);
#endif
#ifdef CONFIG_NET_FASTROUTE
static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst);
#endif
static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length);
#ifdef CONFIG_GFAR_NAPI
static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
#else
static int gfar_clean_rx_ring(struct net_device *dev);
#endif
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
extern struct ethtool_ops gfar_ethtool_ops;
extern void gfar_gstrings_normon(struct net_device *dev, u32 stringset,
u8 * buf);
extern void gfar_fill_stats_normon(struct net_device *dev,
struct ethtool_stats *dummy, u64 * buf);
extern int gfar_stats_count_normon(struct net_device *dev);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");
/* Called by the ocp code to initialize device data structures
* required for bringing up the device
* returns 0 on success */
static int gfar_probe(struct ocp_device *ocpdev)
{
u32 tempval;
struct ocp_device *mdiodev;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
struct ocp_gfar_data *einfo;
int idx;
int err = 0;
struct ethtool_ops *dev_ethtool_ops;
einfo = (struct ocp_gfar_data *) ocpdev->def->additions;
if (einfo == NULL) {
printk(KERN_ERR "gfar %d: Missing additional data!\n",
ocpdev->def->index);
return -ENODEV;
}
/* get a pointer to the register memory which can
* configure the PHYs. If it's different from this set,
* get the device which has those regs */
if ((einfo->phyregidx >= 0) && (einfo->phyregidx != ocpdev->def->index)) {
mdiodev = ocp_find_device(OCP_ANY_ID,
OCP_FUNC_GFAR, einfo->phyregidx);
/* If the device which holds the MDIO regs isn't
* up, wait for it to come up */
if (mdiodev == NULL)
return -EAGAIN;
} else {
mdiodev = ocpdev;
}
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof (*priv));
if (dev == NULL)
return -ENOMEM;
priv = netdev_priv(dev);
/* Set the info in the priv to the current info */
priv->einfo = einfo;
/* get a pointer to the register memory */
priv->regs = (struct gfar *)
ioremap(ocpdev->def->paddr, sizeof (struct gfar));
if (priv->regs == NULL) {
err = -ENOMEM;
goto regs_fail;
}
/* Set the PHY base address */
priv->phyregs = (struct gfar *)
ioremap(mdiodev->def->paddr, sizeof (struct gfar));
if (priv->phyregs == NULL) {
err = -ENOMEM;
goto phy_regs_fail;
}
ocp_set_drvdata(ocpdev, dev);
/* Stop the DMA engine now, in case it was running before */
/* (The firmware could have used it, and left it running). */
/* To do this, we write Graceful Receive Stop and Graceful */
/* Transmit Stop, and then wait until the corresponding bits */
/* in IEVENT indicate the stops have completed. */
tempval = gfar_read(&priv->regs->dmactrl);
tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&priv->regs->dmactrl, tempval);
tempval = gfar_read(&priv->regs->dmactrl);
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&priv->regs->dmactrl, tempval);
while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
cpu_relax();
/* Reset MAC layer */
gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
gfar_write(&priv->regs->maccfg1, tempval);
/* Initialize MACCFG2. */
gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
/* Initialize ECNTRL */
gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
/* Copy the station address into the dev structure, */
/* and into the address registers MAC_STNADDR1,2. */
/* Backwards, because little endian MACs are dumb. */
/* Don't set the regs if the firmware already did */
memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long) (priv->regs);
SET_MODULE_OWNER(dev);
/* Fill in the dev structure */
dev->open = gfar_enet_open;
dev->hard_start_xmit = gfar_start_xmit;
dev->tx_timeout = gfar_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_GFAR_NAPI
dev->poll = gfar_poll;
dev->weight = GFAR_DEV_WEIGHT;
#endif
dev->stop = gfar_close;
dev->get_stats = gfar_get_stats;
dev->change_mtu = gfar_change_mtu;
dev->mtu = 1500;
dev->set_multicast_list = gfar_set_multi;
dev->flags |= IFF_MULTICAST;
dev_ethtool_ops =
(struct ethtool_ops *)kmalloc(sizeof(struct ethtool_ops),
GFP_KERNEL);
if(dev_ethtool_ops == NULL) {
err = -ENOMEM;
goto ethtool_fail;
}
memcpy(dev_ethtool_ops, &gfar_ethtool_ops, sizeof(gfar_ethtool_ops));
/* If there is no RMON support in this device, we don't
* want to expose non-existant statistics */
if((priv->einfo->flags & GFAR_HAS_RMON) == 0) {
dev_ethtool_ops->get_strings = gfar_gstrings_normon;
dev_ethtool_ops->get_stats_count = gfar_stats_count_normon;
dev_ethtool_ops->get_ethtool_stats = gfar_fill_stats_normon;
}
if((priv->einfo->flags & GFAR_HAS_COALESCE) == 0) {
dev_ethtool_ops->set_coalesce = NULL;
dev_ethtool_ops->get_coalesce = NULL;
}
dev->ethtool_ops = dev_ethtool_ops;
#ifdef CONFIG_NET_FASTROUTE
dev->accept_fastpath = gfar_accept_fastpath;
#endif
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
#ifdef CONFIG_GFAR_BUFSTASH
priv->rx_stash_size = STASH_LENGTH;
#endif
priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
/* Initially, coalescing is disabled */
priv->txcoalescing = 0;
priv->txcount = 0;
priv->txtime = 0;
priv->rxcoalescing = 0;
priv->rxcount = 0;
priv->rxtime = 0;
err = register_netdev(dev);
if (err) {
printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
dev->name);
goto register_fail;
}
/* Print out the device info */
printk(DEVICE_NAME, dev->name);
for (idx = 0; idx < 6; idx++)
printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
printk("\n");
/* Even more device info helps when determining which kernel */
/* provided which set of benchmarks. Since this is global for all */
/* devices, we only print it once */
#ifdef CONFIG_GFAR_NAPI
printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
#else
printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
#endif
printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
dev->name, priv->rx_ring_size, priv->tx_ring_size);
return 0;
register_fail:
kfree(dev_ethtool_ops);
ethtool_fail:
iounmap((void *) priv->phyregs);
phy_regs_fail:
iounmap((void *) priv->regs);
regs_fail:
free_netdev(dev);
return -ENOMEM;
}
static void gfar_remove(struct ocp_device *ocpdev)
{
struct net_device *dev = ocp_get_drvdata(ocpdev);
struct gfar_private *priv = netdev_priv(dev);
ocp_set_drvdata(ocpdev, NULL);
kfree(dev->ethtool_ops);
iounmap((void *) priv->regs);
iounmap((void *) priv->phyregs);
free_netdev(dev);
}
/* Configure the PHY for dev.
* returns 0 if success. -1 if failure
*/
static int init_phy(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct phy_info *curphy;
priv->link = 1;
priv->oldlink = 0;
priv->oldspeed = 0;
priv->olddplx = -1;
/* get info for this PHY */
curphy = get_phy_info(dev);
if (curphy == NULL) {
printk(KERN_ERR "%s: No PHY found\n", dev->name);
return -1;
}
priv->phyinfo = curphy;
/* Run the commands which configure the PHY */
phy_run_commands(dev, curphy->config);
return 0;
}
static void init_registers(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
/* Clear IEVENT */
gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
/* Initialize IMASK */
gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
/* Init hash registers to zero */
gfar_write(&priv->regs->iaddr0, 0);
gfar_write(&priv->regs->iaddr1, 0);
gfar_write(&priv->regs->iaddr2, 0);
gfar_write(&priv->regs->iaddr3, 0);
gfar_write(&priv->regs->iaddr4, 0);
gfar_write(&priv->regs->iaddr5, 0);
gfar_write(&priv->regs->iaddr6, 0);
gfar_write(&priv->regs->iaddr7, 0);
gfar_write(&priv->regs->gaddr0, 0);
gfar_write(&priv->regs->gaddr1, 0);
gfar_write(&priv->regs->gaddr2, 0);
gfar_write(&priv->regs->gaddr3, 0);
gfar_write(&priv->regs->gaddr4, 0);
gfar_write(&priv->regs->gaddr5, 0);
gfar_write(&priv->regs->gaddr6, 0);
gfar_write(&priv->regs->gaddr7, 0);
/* Zero out rctrl */
gfar_write(&priv->regs->rctrl, 0x00000000);
/* Zero out the rmon mib registers if it has them */
if (priv->einfo->flags & GFAR_HAS_RMON) {
memset((void *) &(priv->regs->rmon), 0,
sizeof (struct rmon_mib));
/* Mask off the CAM interrupts */
gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
}
/* Initialize the max receive buffer length */
gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
#ifdef CONFIG_GFAR_BUFSTASH
/* If we are stashing buffers, we need to set the
* extraction length to the size of the buffer */
gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16);
#endif
/* Initialize the Minimum Frame Length Register */
gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
/* Setup Attributes so that snooping is on for rx */
gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS);
gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS);
/* Assign the TBI an address which won't conflict with the PHYs */
gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
}
void stop_gfar(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar *regs = priv->regs;
unsigned long flags;
u32 tempval;
/* Lock it down */
spin_lock_irqsave(&priv->lock, flags);
/* Tell the kernel the link is down */
priv->link = 0;
adjust_link(dev);
/* Mask all interrupts */
gfar_write(&regs->imask, IMASK_INIT_CLEAR);
/* Clear all interrupts */
gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&priv->regs->dmactrl);
if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
!= (DMACTRL_GRS | DMACTRL_GTS)) {
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&priv->regs->dmactrl, tempval);
while (!(gfar_read(&priv->regs->ievent) &
(IEVENT_GRSC | IEVENT_GTSC)))
cpu_relax();
}
/* Disable Rx and Tx */
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
phy_run_commands(dev, priv->phyinfo->shutdown);
}
spin_unlock_irqrestore(&priv->lock, flags);
/* Free the IRQs */
if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) {
free_irq(priv->einfo->interruptError, dev);
free_irq(priv->einfo->interruptTransmit, dev);
free_irq(priv->einfo->interruptReceive, dev);
} else {
free_irq(priv->einfo->interruptTransmit, dev);
}
if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
free_irq(priv->einfo->interruptPHY, dev);
} else {
del_timer_sync(&priv->phy_info_timer);
}
free_skb_resources(priv);
dma_unmap_single(NULL, gfar_read(&regs->tbase),
sizeof(struct txbd)*priv->tx_ring_size,
DMA_BIDIRECTIONAL);
dma_unmap_single(NULL, gfar_read(&regs->rbase),
sizeof(struct rxbd)*priv->rx_ring_size,
DMA_BIDIRECTIONAL);
/* Free the buffer descriptors */
kfree(priv->tx_bd_base);
}
/* If there are any tx skbs or rx skbs still around, free them.
* Then free tx_skbuff and rx_skbuff */
void free_skb_resources(struct gfar_private *priv)
{
struct rxbd8 *rxbdp;
struct txbd8 *txbdp;
int i;
/* Go through all the buffer descriptors and free their data buffers */
txbdp = priv->tx_bd_base;
for (i = 0; i < priv->tx_ring_size; i++) {
if (priv->tx_skbuff[i]) {
dma_unmap_single(NULL, txbdp->bufPtr,
txbdp->length,
DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
}
kfree(priv->tx_skbuff);
rxbdp = priv->rx_bd_base;
/* rx_skbuff is not guaranteed to be allocated, so only
* free it and its contents if it is allocated */
if(priv->rx_skbuff != NULL) {
for (i = 0; i < priv->rx_ring_size; i++) {
if (priv->rx_skbuff[i]) {
dma_unmap_single(NULL, rxbdp->bufPtr,
priv->rx_buffer_size
+ RXBUF_ALIGNMENT,
DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_skbuff[i]);
priv->rx_skbuff[i] = NULL;
}
rxbdp->status = 0;
rxbdp->length = 0;
rxbdp->bufPtr = 0;
rxbdp++;
}
kfree(priv->rx_skbuff);
}
}
/* Bring the controller up and running */
int startup_gfar(struct net_device *dev)
{
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
unsigned long addr;
int i;
struct gfar_private *priv = netdev_priv(dev);
struct gfar *regs = priv->regs;
u32 tempval;
int err = 0;
gfar_write(&regs->imask, IMASK_INIT_CLEAR);
/* Allocate memory for the buffer descriptors */
addr =
(unsigned int) kmalloc(sizeof (struct txbd8) * priv->tx_ring_size +
sizeof (struct rxbd8) * priv->rx_ring_size,
GFP_KERNEL);
if (addr == 0) {
printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
dev->name);
return -ENOMEM;
}
priv->tx_bd_base = (struct txbd8 *) addr;
/* enet DMA only understands physical addresses */
gfar_write(&regs->tbase,
dma_map_single(NULL, (void *)addr,
sizeof(struct txbd8) * priv->tx_ring_size,
DMA_BIDIRECTIONAL));
/* Start the rx descriptor ring where the tx ring leaves off */
addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
priv->rx_bd_base = (struct rxbd8 *) addr;
gfar_write(&regs->rbase,
dma_map_single(NULL, (void *)addr,
sizeof(struct rxbd8) * priv->rx_ring_size,
DMA_BIDIRECTIONAL));
/* Setup the skbuff rings */
priv->tx_skbuff =
(struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
priv->tx_ring_size, GFP_KERNEL);
if (priv->tx_skbuff == NULL) {
printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
dev->name);
err = -ENOMEM;
goto tx_skb_fail;
}
for (i = 0; i < priv->tx_ring_size; i++)
priv->tx_skbuff[i] = NULL;
priv->rx_skbuff =
(struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
priv->rx_ring_size, GFP_KERNEL);
if (priv->rx_skbuff == NULL) {
printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
dev->name);
err = -ENOMEM;
goto rx_skb_fail;
}
for (i = 0; i < priv->rx_ring_size; i++)
priv->rx_skbuff[i] = NULL;
/* Initialize some variables in our dev structure */
priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
priv->cur_rx = priv->rx_bd_base;
priv->skb_curtx = priv->skb_dirtytx = 0;
priv->skb_currx = 0;
/* Initialize Transmit Descriptor Ring */
txbdp = priv->tx_bd_base;
for (i = 0; i < priv->tx_ring_size; i++) {
txbdp->status = 0;
txbdp->length = 0;
txbdp->bufPtr = 0;
txbdp++;
}
/* Set the last descriptor in the ring to indicate wrap */
txbdp--;
txbdp->status |= TXBD_WRAP;
rxbdp = priv->rx_bd_base;
for (i = 0; i < priv->rx_ring_size; i++) {
struct sk_buff *skb = NULL;
rxbdp->status = 0;
skb = gfar_new_skb(dev, rxbdp);
priv->rx_skbuff[i] = skb;
rxbdp++;
}
/* Set the last descriptor in the ring to wrap */
rxbdp--;
rxbdp->status |= RXBD_WRAP;
/* If the device has multiple interrupts, register for
* them. Otherwise, only register for the one */
if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) {
/* Install our interrupt handlers for Error,
* Transmit, and Receive */
if (request_irq(priv->einfo->interruptError, gfar_error,
0, "enet_error", dev) < 0) {
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, priv->einfo->interruptError);
err = -1;
goto err_irq_fail;
}
if (request_irq(priv->einfo->interruptTransmit, gfar_transmit,
0, "enet_tx", dev) < 0) {
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, priv->einfo->interruptTransmit);
err = -1;
goto tx_irq_fail;
}
if (request_irq(priv->einfo->interruptReceive, gfar_receive,
0, "enet_rx", dev) < 0) {
printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
dev->name, priv->einfo->interruptReceive);
err = -1;
goto rx_irq_fail;
}
} else {
if (request_irq(priv->einfo->interruptTransmit, gfar_interrupt,
0, "gfar_interrupt", dev) < 0) {
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, priv->einfo->interruptError);
err = -1;
goto err_irq_fail;
}
}
/* Grab the PHY interrupt */
if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
if (request_irq(priv->einfo->interruptPHY, phy_interrupt,
SA_SHIRQ, "phy_interrupt", dev) < 0) {
printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
dev->name, priv->einfo->interruptPHY);
err = -1;
if (priv->einfo->flags & GFAR_HAS_MULTI_INTR)
goto phy_irq_fail;
else
goto tx_irq_fail;
}
} else {
init_timer(&priv->phy_info_timer);
priv->phy_info_timer.function = &gfar_phy_timer;
priv->phy_info_timer.data = (unsigned long) dev;
mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ);
}
/* Set up the bottom half queue */
INIT_WORK(&priv->tq, (void (*)(void *))gfar_phy_change, dev);
/* Configure the PHY interrupt */
phy_run_commands(dev, priv->phyinfo->startup);
/* Tell the kernel the link is up, and determine the
* negotiated features (speed, duplex) */
adjust_link(dev);
if (priv->link == 0)
printk(KERN_INFO "%s: No link detected\n", dev->name);
/* Configure the coalescing support */
if (priv->txcoalescing)
gfar_write(&regs->txic,
mk_ic_value(priv->txcount, priv->txtime));
else
gfar_write(&regs->txic, 0);
if (priv->rxcoalescing)
gfar_write(&regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
else
gfar_write(&regs->rxic, 0);
init_waitqueue_head(&priv->rxcleanupq);
/* Enable Rx and Tx in MACCFG1 */
tempval = gfar_read(&regs->maccfg1);
tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&priv->regs->dmactrl);
tempval |= DMACTRL_INIT_SETTINGS;
gfar_write(&priv->regs->dmactrl, tempval);
/* Clear THLT, so that the DMA starts polling now */
gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
/* Make sure we aren't stopped */
tempval = gfar_read(&priv->regs->dmactrl);
tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&priv->regs->dmactrl, tempval);
/* Unmask the interrupts we look for */
gfar_write(&regs->imask, IMASK_DEFAULT);
return 0;
phy_irq_fail:
free_irq(priv->einfo->interruptReceive, dev);
rx_irq_fail:
free_irq(priv->einfo->interruptTransmit, dev);
tx_irq_fail:
free_irq(priv->einfo->interruptError, dev);
err_irq_fail:
rx_skb_fail:
free_skb_resources(priv);
tx_skb_fail:
kfree(priv->tx_bd_base);
return err;
}
/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int gfar_enet_open(struct net_device *dev)
{
int err;
/* Initialize a bunch of registers */
init_registers(dev);
gfar_set_mac_address(dev);
err = init_phy(dev);
if (err)
return err;
err = startup_gfar(dev);
netif_start_queue(dev);
return err;
}
/* This is called by the kernel when a frame is ready for transmission. */
/* It is pointed to by the dev->hard_start_xmit function pointer */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct txbd8 *txbdp;
/* Update transmit stats */
priv->stats.tx_bytes += skb->len;
/* Lock priv now */
spin_lock_irq(&priv->lock);
/* Point at the first free tx descriptor */
txbdp = priv->cur_tx;
/* Clear all but the WRAP status flags */
txbdp->status &= TXBD_WRAP;
/* Set buffer length and pointer */
txbdp->length = skb->len;
txbdp->bufPtr = dma_map_single(NULL, skb->data,
skb->len, DMA_TO_DEVICE);
/* Save the skb pointer so we can free it later */
priv->tx_skbuff[priv->skb_curtx] = skb;
/* Update the current skb pointer (wrapping if this was the last) */
priv->skb_curtx =
(priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
/* Flag the BD as interrupt-causing */
txbdp->status |= TXBD_INTERRUPT;
/* Flag the BD as ready to go, last in frame, and */
/* in need of CRC */
txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
dev->trans_start = jiffies;
/* If this was the last BD in the ring, the next one */
/* is at the beginning of the ring */
if (txbdp->status & TXBD_WRAP)
txbdp = priv->tx_bd_base;
else
txbdp++;
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
if (txbdp == priv->dirty_tx) {
netif_stop_queue(dev);
priv->stats.tx_fifo_errors++;
}
/* Update the current txbd to the next one */
priv->cur_tx = txbdp;
/* Tell the DMA to go go go */
gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
/* Unlock priv */
spin_unlock_irq(&priv->lock);
return 0;
}
/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
stop_gfar(dev);
netif_stop_queue(dev);
return 0;
}
/* returns a net_device_stats structure pointer */
static struct net_device_stats * gfar_get_stats(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
return &(priv->stats);
}
/* Changes the mac address if the controller is not running. */
int gfar_set_mac_address(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
int i;
char tmpbuf[MAC_ADDR_LEN];
u32 tempval;
/* Now copy it into the mac registers backwards, cuz */
/* little endian is silly */
for (i = 0; i < MAC_ADDR_LEN; i++)
tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i];
gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf)));
tempval = *((u32 *) (tmpbuf + 4));
gfar_write(&priv->regs->macstnaddr2, tempval);
return 0;
}
/**********************************************************************
* gfar_accept_fastpath
*
* Used to authenticate to the kernel that a fast path entry can be
* added to device's routing table cache
*
* Input : pointer to ethernet interface network device structure and
* a pointer to the designated entry to be added to the cache.
* Output : zero upon success, negative upon failure
**********************************************************************/
#ifdef CONFIG_NET_FASTROUTE
static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
{
struct net_device *odev = dst->dev;
if ((dst->ops->protocol != __constant_htons(ETH_P_IP))
|| (odev->type != ARPHRD_ETHER)
|| (odev->accept_fastpath == NULL)) {
return -1;
}
return 0;
}
#endif
/* try_fastroute() -- Checks the fastroute cache to see if a given packet
* can be routed immediately to another device. If it can, we send it.
* If we used a fastroute, we return 1. Otherwise, we return 0.
* Returns 0 if CONFIG_NET_FASTROUTE is not on
*/
static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length)
{
#ifdef CONFIG_NET_FASTROUTE
struct ethhdr *eth;
struct iphdr *iph;
unsigned int hash;
struct rtable *rt;
struct net_device *odev;
struct gfar_private *priv = netdev_priv(dev);
unsigned int CPU_ID = smp_processor_id();
eth = (struct ethhdr *) (skb->data);
/* Only route ethernet IP packets */
if (eth->h_proto == __constant_htons(ETH_P_IP)) {
iph = (struct iphdr *) (skb->data + ETH_HLEN);
/* Generate the hash value */
hash = ((*(u8 *) &iph->daddr) ^ (*(u8 *) & iph->saddr)) & NETDEV_FASTROUTE_HMASK;
rt = (struct rtable *) (dev->fastpath[hash]);
if (rt != NULL
&& ((*(u32 *) &iph->daddr) == (*(u32 *) &rt->key.dst))
&& ((*(u32 *) &iph->saddr) == (*(u32 *) &rt->key.src))
&& !(rt->u.dst.obsolete)) {
odev = rt->u.dst.dev;
netdev_rx_stat[CPU_ID].fastroute_hit++;
/* Make sure the packet is:
* 1) IPv4
* 2) without any options (header length of 5)
* 3) Not a multicast packet
* 4) going to a valid destination
* 5) Not out of time-to-live
*/
if (iph->version == 4
&& iph->ihl == 5
&& (!(eth->h_dest[0] & 0x01))
&& neigh_is_valid(rt->u.dst.neighbour)
&& iph->ttl > 1) {
/* Fast Route Path: Taken if the outgoing device is ready to transmit the packet now */
if ((!netif_queue_stopped(odev))
&& (!spin_is_locked(odev->xmit_lock))
&& (skb->len <= (odev->mtu + ETH_HLEN + 2 + 4))) {
skb->pkt_type = PACKET_FASTROUTE;
skb->protocol = __constant_htons(ETH_P_IP);
ip_decrease_ttl(iph);
memcpy(eth->h_source, odev->dev_addr, MAC_ADDR_LEN);
memcpy(eth->h_dest, rt->u.dst.neighbour->ha, MAC_ADDR_LEN);
skb->dev = odev;
/* Prep the skb for the packet */
skb_put(skb, length);
if (odev->hard_start_xmit(skb, odev) != 0) {
panic("%s: FastRoute path corrupted", dev->name);
}
netdev_rx_stat[CPU_ID].fastroute_success++;
}
/* Semi Fast Route Path: Mark the packet as needing fast routing, but let the
* stack handle getting it to the device */
else {
skb->pkt_type = PACKET_FASTROUTE;
skb->nh.raw = skb->data + ETH_HLEN;
skb->protocol = __constant_htons(ETH_P_IP);
netdev_rx_stat[CPU_ID].fastroute_defer++;
/* Prep the skb for the packet */
skb_put(skb, length);
if(RECEIVE(skb) == NET_RX_DROP) {
priv->extra_stats.kernel_dropped++;
}
}
return 1;
}
}
}
#endif /* CONFIG_NET_FASTROUTE */
return 0;
}
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
int tempsize, tempval;
struct gfar_private *priv = netdev_priv(dev);
int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + 18;
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name);
return -EINVAL;
}
tempsize =
(frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
INCREMENTAL_BUFFER_SIZE;
/* Only stop and start the controller if it isn't already
* stopped */
if ((oldsize != tempsize) && (dev->flags & IFF_UP))
stop_gfar(dev);
priv->rx_buffer_size = tempsize;
dev->mtu = new_mtu;
gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
/* If the mtu is larger than the max size for standard
* ethernet frames (ie, a jumbo frame), then set maccfg2
* to allow huge frames, and to check the length */
tempval = gfar_read(&priv->regs->maccfg2);
if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
else
tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
gfar_write(&priv->regs->maccfg2, tempval);
if ((oldsize != tempsize) && (dev->flags & IFF_UP))
startup_gfar(dev);
return 0;
}
/* gfar_timeout gets called when a packet has not been
* transmitted after a set amount of time.
* For now, assume that clearing out all the structures, and
* starting over will fix the problem. */
static void gfar_timeout(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
priv->stats.tx_errors++;
if (dev->flags & IFF_UP) {
stop_gfar(dev);
startup_gfar(dev);
}
if (!netif_queue_stopped(dev))
netif_schedule(dev);
}
/* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev);
struct txbd8 *bdp;
/* Clear IEVENT */
gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
/* Lock priv */
spin_lock(&priv->lock);
bdp = priv->dirty_tx;
while ((bdp->status & TXBD_READY) == 0) {
/* If dirty_tx and cur_tx are the same, then either the */
/* ring is empty or full now (it could only be full in the beginning, */
/* obviously). If it is empty, we are done. */
if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
break;
priv->stats.tx_packets++;
/* Deferred means some collisions occurred during transmit, */
/* but we eventually sent the packet. */
if (bdp->status & TXBD_DEF)
priv->stats.collisions++;
/* Free the sk buffer associated with this TxBD */
dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
priv->tx_skbuff[priv->skb_dirtytx] = NULL;
priv->skb_dirtytx =
(priv->skb_dirtytx +
1) & TX_RING_MOD_MASK(priv->tx_ring_size);
/* update bdp to point at next bd in the ring (wrapping if necessary) */
if (bdp->status & TXBD_WRAP)
bdp = priv->tx_bd_base;
else
bdp++;
/* Move dirty_tx to be the next bd */
priv->dirty_tx = bdp;
/* We freed a buffer, so now we can restart transmission */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
} /* while ((bdp->status & TXBD_READY) == 0) */
/* If we are coalescing the interrupts, reset the timer */
/* Otherwise, clear it */
if (priv->txcoalescing)
gfar_write(&priv->regs->txic,
mk_ic_value(priv->txcount, priv->txtime));
else
gfar_write(&priv->regs->txic, 0);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
unsigned int timeout = SKB_ALLOC_TIMEOUT;
/* We have to allocate the skb, so keep trying till we succeed */
while ((!skb) && timeout--)
skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
if (skb == NULL)
return NULL;
/* We need the data buffer to be aligned properly. We will reserve
* as many bytes as needed to align the data properly
*/
skb_reserve(skb,
RXBUF_ALIGNMENT -
(((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)));
skb->dev = dev;
bdp->bufPtr = dma_map_single(NULL, skb->data,
priv->rx_buffer_size + RXBUF_ALIGNMENT,
DMA_FROM_DEVICE);
bdp->length = 0;
/* Mark the buffer empty */
bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
return skb;
}
static inline void count_errors(unsigned short status, struct gfar_private *priv)
{
struct net_device_stats *stats = &priv->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
/* If the packet was truncated, none of the other errors
* matter */
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
estats->rx_trunc++;
return;
}
/* Count the errors, if there were any */
if (status & (RXBD_LARGE | RXBD_SHORT)) {
stats->rx_length_errors++;
if (status & RXBD_LARGE)
estats->rx_large++;
else
estats->rx_short++;
}
if (status & RXBD_NONOCTET) {
stats->rx_frame_errors++;
estats->rx_nonoctet++;
}
if (status & RXBD_CRCERR) {
estats->rx_crcerr++;
stats->rx_crc_errors++;
}
if (status & RXBD_OVERRUN) {
estats->rx_overrun++;
stats->rx_crc_errors++;
}
}
irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev);
#ifdef CONFIG_GFAR_NAPI
u32 tempval;
#endif
/* Clear IEVENT, so rx interrupt isn't called again
* because of this interrupt */
gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
/* support NAPI */
#ifdef CONFIG_GFAR_NAPI
if (netif_rx_schedule_prep(dev)) {
tempval = gfar_read(&priv->regs->imask);
tempval &= IMASK_RX_DISABLED;
gfar_write(&priv->regs->imask, tempval);
__netif_rx_schedule(dev);
} else {
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
dev->name, gfar_read(priv->regs->ievent),
gfar_read(priv->regs->imask));
#endif
}
#else
spin_lock(&priv->lock);
gfar_clean_rx_ring(dev);
/* If we are coalescing interrupts, update the timer */
/* Otherwise, clear it */
if (priv->rxcoalescing)
gfar_write(&priv->regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
else
gfar_write(&priv->regs->rxic, 0);
/* Just in case we need to wake the ring param changer */
priv->rxclean = 1;
spin_unlock(&priv->lock);
#endif
return IRQ_HANDLED;
}
/* gfar_process_frame() -- handle one incoming packet if skb
* isn't NULL. Try the fastroute before using the stack */
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int length)
{
struct gfar_private *priv = netdev_priv(dev);
if (skb == NULL) {
#ifdef BRIEF_GFAR_ERRORS
printk(KERN_WARNING "%s: Missing skb!!.\n",
dev->name);
#endif
priv->stats.rx_dropped++;
priv->extra_stats.rx_skbmissing++;
} else {
if(try_fastroute(skb, dev, length) == 0) {
/* Prep the skb for the packet */
skb_put(skb, length);
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
/* Send the packet up the stack */
if (RECEIVE(skb) == NET_RX_DROP) {
priv->extra_stats.kernel_dropped++;
}
}
}
return 0;
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
* until all are gone (or, in the case of NAPI, the budget/quota
* has been reached). Returns the number of frames handled
*/
#ifdef CONFIG_GFAR_NAPI
static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
#else
static int gfar_clean_rx_ring(struct net_device *dev)
#endif
{
struct rxbd8 *bdp;
struct sk_buff *skb;
u16 pkt_len;
int howmany = 0;
struct gfar_private *priv = netdev_priv(dev);
/* Get the first full descriptor */
bdp = priv->cur_rx;
#ifdef CONFIG_GFAR_NAPI
#define GFAR_RXDONE() ((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))
#else
#define GFAR_RXDONE() (bdp->status & RXBD_EMPTY)
#endif
while (!GFAR_RXDONE()) {
skb = priv->rx_skbuff[priv->skb_currx];
if (!(bdp->status &
(RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
| RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
/* Increment the number of packets */
priv->stats.rx_packets++;
howmany++;
/* Remove the FCS from the packet length */
pkt_len = bdp->length - 4;
gfar_process_frame(dev, skb, pkt_len);
priv->stats.rx_bytes += pkt_len;
} else {
count_errors(bdp->status, priv);
if (skb)
dev_kfree_skb_any(skb);
priv->rx_skbuff[priv->skb_currx] = NULL;
}
dev->last_rx = jiffies;
/* Clear the status flags for this buffer */
bdp->status &= ~RXBD_STATS;
/* Add another skb for the future */
skb = gfar_new_skb(dev, bdp);
priv->rx_skbuff[priv->skb_currx] = skb;
/* Update to the next pointer */
if (bdp->status & RXBD_WRAP)
bdp = priv->rx_bd_base;
else
bdp++;
/* update to point at the next skb */
priv->skb_currx =
(priv->skb_currx +
1) & RX_RING_MOD_MASK(priv->rx_ring_size);
}
/* Update the current rxbd pointer to be the next one */
priv->cur_rx = bdp;
/* If no packets have arrived since the
* last one we processed, clear the IEVENT RX and
* BSY bits so that another interrupt won't be
* generated when we set IMASK */
if (bdp->status & RXBD_EMPTY)
gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
return howmany;
}
#ifdef CONFIG_GFAR_NAPI
static int gfar_poll(struct net_device *dev, int *budget)
{
int howmany;
struct gfar_private *priv = netdev_priv(dev);
int rx_work_limit = *budget;
if (rx_work_limit > dev->quota)
rx_work_limit = dev->quota;
spin_lock(&priv->lock);
howmany = gfar_clean_rx_ring(dev, rx_work_limit);
dev->quota -= howmany;
rx_work_limit -= howmany;
*budget -= howmany;
if (rx_work_limit >= 0) {
netif_rx_complete(dev);
/* Clear the halt bit in RSTAT */
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
gfar_write(&priv->regs->imask, IMASK_DEFAULT);
/* If we are coalescing interrupts, update the timer */
/* Otherwise, clear it */
if (priv->rxcoalescing)
gfar_write(&priv->regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
else
gfar_write(&priv->regs->rxic, 0);
/* Signal to the ring size changer that it's safe to go */
priv->rxclean = 1;
}
spin_unlock(priv->lock);
return (rx_work_limit < 0) ? 1 : 0;
}
#endif
/* The interrupt handler for devices with one interrupt */
static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct gfar_private *priv = netdev_priv(dev);
/* Save ievent for future reference */
u32 events = gfar_read(&priv->regs->ievent);
/* Clear IEVENT */
gfar_write(&priv->regs->ievent, events);
/* Check for reception */
if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
gfar_receive(irq, dev_id, regs);
/* Check for transmit completion */
if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
gfar_transmit(irq, dev_id, regs);
/* Update error statistics */
if (events & IEVENT_TXE) {
priv->stats.tx_errors++;
if (events & IEVENT_LC)
priv->stats.tx_window_errors++;
if (events & IEVENT_CRL)
priv->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_WARNING "%s: tx underrun. dropped packet\n",
dev->name);
#endif
priv->stats.tx_dropped++;
priv->extra_stats.tx_underrun++;
/* Reactivate the Tx Queues */
gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
}
}
if (events & IEVENT_BSY) {
priv->stats.rx_errors++;
priv->extra_stats.rx_bsy++;
gfar_receive(irq, dev_id, regs);
#ifndef CONFIG_GFAR_NAPI
/* Clear the halt bit in RSTAT */
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
#endif
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
gfar_read(priv->regs->rstat));
#endif
}
if (events & IEVENT_BABR) {
priv->stats.rx_errors++;
priv->extra_stats.rx_babr++;
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: babbling error\n", dev->name);
#endif
}
if (events & IEVENT_EBERR) {
priv->extra_stats.eberr++;
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: EBERR\n", dev->name);
#endif
}
if (events & IEVENT_RXC) {
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: control frame\n", dev->name);
#endif
}
if (events & IEVENT_BABT) {
priv->extra_stats.tx_babt++;
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: babt error\n", dev->name);
#endif
}
return IRQ_HANDLED;
}
static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev);
/* Run the commands which acknowledge the interrupt */
phy_run_commands(dev, priv->phyinfo->ack_int);
/* Schedule the bottom half */
schedule_work(&priv->tq);
return IRQ_HANDLED;
}
/* Scheduled by the phy_interrupt/timer to handle PHY changes */
static void gfar_phy_change(void *data)
{
struct net_device *dev = (struct net_device *) data;
struct gfar_private *priv = netdev_priv(dev);
int timeout = HZ / 1000 + 1;
/* Delay to give the PHY a chance to change the
* register state */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(timeout);
/* Run the commands which check the link state */
phy_run_commands(dev, priv->phyinfo->handle_int);
/* React to the change in state */
adjust_link(dev);
}
/* Called every so often on systems that don't interrupt
* the core for PHY changes */
static void gfar_phy_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
struct gfar_private *priv = netdev_priv(dev);
schedule_work(&priv->tq);
mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ);
}
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the priv structure, and this
* function converts those variables into the appropriate
* register values, and can bring down the device if needed.
*/
static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar *regs = priv->regs;
u32 tempval;
if (priv->link) {
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
if (priv->duplexity != priv->olddplx) {
if (!(priv->duplexity)) {
tempval = gfar_read(&regs->maccfg2);
tempval &= ~(MACCFG2_FULL_DUPLEX);
gfar_write(&regs->maccfg2, tempval);
printk(KERN_INFO "%s: Half Duplex\n",
dev->name);
} else {
tempval = gfar_read(&regs->maccfg2);
tempval |= MACCFG2_FULL_DUPLEX;
gfar_write(&regs->maccfg2, tempval);
printk(KERN_INFO "%s: Full Duplex\n",
dev->name);
}
priv->olddplx = priv->duplexity;
}
if (priv->speed != priv->oldspeed) {
switch (priv->speed) {
case 1000:
tempval = gfar_read(&regs->maccfg2);
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
gfar_write(&regs->maccfg2, tempval);
break;
case 100:
case 10:
tempval = gfar_read(&regs->maccfg2);
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
gfar_write(&regs->maccfg2, tempval);
break;
default:
printk(KERN_WARNING
"%s: Ack! Speed (%d) is not 10/100/1000!\n",
dev->name, priv->speed);
break;
}
printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
priv->speed);
priv->oldspeed = priv->speed;
}
if (!priv->oldlink) {
printk(KERN_INFO "%s: Link is up\n", dev->name);
priv->oldlink = 1;
netif_carrier_on(dev);
netif_schedule(dev);
}
} else {
if (priv->oldlink) {
printk(KERN_INFO "%s: Link is down\n", dev->name);
priv->oldlink = 0;
priv->oldspeed = 0;
priv->olddplx = -1;
netif_carrier_off(dev);
}
}
}
/* Update the hash table based on the current list of multicast
* addresses we subscribe to. Also, change the promiscuity of
* the device based on the flags (this function is called
* whenever dev->flags is changed */
static void gfar_set_multi(struct net_device *dev)
{
struct dev_mc_list *mc_ptr;
struct gfar_private *priv = netdev_priv(dev);
struct gfar *regs = priv->regs;
u32 tempval;
if(dev->flags & IFF_PROMISC) {
printk(KERN_INFO "%s: Entering promiscuous mode.\n",
dev->name);
/* Set RCTRL to PROM */
tempval = gfar_read(&regs->rctrl);
tempval |= RCTRL_PROM;
gfar_write(&regs->rctrl, tempval);
} else {
/* Set RCTRL to not PROM */
tempval = gfar_read(&regs->rctrl);
tempval &= ~(RCTRL_PROM);
gfar_write(&regs->rctrl, tempval);
}
if(dev->flags & IFF_ALLMULTI) {
/* Set the hash to rx all multicast frames */
gfar_write(&regs->gaddr0, 0xffffffff);
gfar_write(&regs->gaddr1, 0xffffffff);
gfar_write(&regs->gaddr2, 0xffffffff);
gfar_write(&regs->gaddr3, 0xffffffff);
gfar_write(&regs->gaddr4, 0xffffffff);
gfar_write(&regs->gaddr5, 0xffffffff);
gfar_write(&regs->gaddr6, 0xffffffff);
gfar_write(&regs->gaddr7, 0xffffffff);
} else {
/* zero out the hash */
gfar_write(&regs->gaddr0, 0x0);
gfar_write(&regs->gaddr1, 0x0);
gfar_write(&regs->gaddr2, 0x0);
gfar_write(&regs->gaddr3, 0x0);
gfar_write(&regs->gaddr4, 0x0);
gfar_write(&regs->gaddr5, 0x0);
gfar_write(&regs->gaddr6, 0x0);
gfar_write(&regs->gaddr7, 0x0);
if(dev->mc_count == 0)
return;
/* Parse the list, and set the appropriate bits */
for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
}
}
return;
}
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
* 1) Take the Destination Address (ie the multicast address), and
* do a CRC on it (little endian), and reverse the bits of the
* result.
* 2) Use the 8 most significant bits as a hash into a 256-entry
* table. The table is controlled through 8 32-bit registers:
* gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
* gaddr7. This means that the 3 most significant bits in the
* hash index which gaddr register to use, and the 5 other bits
* indicate which bit (assuming an IBM numbering scheme, which
* for PowerPC (tm) is usually the case) in the register holds
* the entry. */
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
struct gfar_private *priv = netdev_priv(dev);
struct gfar *regs = priv->regs;
u32 *hash = &regs->gaddr0;
u32 result = ether_crc(MAC_ADDR_LEN, addr);
u8 whichreg = ((result >> 29) & 0x7);
u8 whichbit = ((result >> 24) & 0x1f);
u32 value = (1 << (31-whichbit));
tempval = gfar_read(&hash[whichreg]);
tempval |= value;
gfar_write(&hash[whichreg], tempval);
return;
}
/* GFAR error interrupt handler */
static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct gfar_private *priv = netdev_priv(dev);
/* Save ievent for future reference */
u32 events = gfar_read(&priv->regs->ievent);
/* Clear IEVENT */
gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
/* Hmm... */
#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS)
printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
dev->name, events, gfar_read(priv->regs->imask));
#endif
/* Update the error counters */
if (events & IEVENT_TXE) {
priv->stats.tx_errors++;
if (events & IEVENT_LC)
priv->stats.tx_window_errors++;
if (events & IEVENT_CRL)
priv->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: underrun. packet dropped.\n",
dev->name);
#endif
priv->stats.tx_dropped++;
priv->extra_stats.tx_underrun++;
/* Reactivate the Tx Queues */
gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
}
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
#endif
}
if (events & IEVENT_BSY) {
priv->stats.rx_errors++;
priv->extra_stats.rx_bsy++;
gfar_receive(irq, dev_id, regs);
#ifndef CONFIG_GFAR_NAPI
/* Clear the halt bit in RSTAT */
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
#endif
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
gfar_read(priv->regs->rstat));
#endif
}
if (events & IEVENT_BABR) {
priv->stats.rx_errors++;
priv->extra_stats.rx_babr++;
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: babbling error\n", dev->name);
#endif
}
if (events & IEVENT_EBERR) {
priv->extra_stats.eberr++;
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: EBERR\n", dev->name);
#endif
}
if (events & IEVENT_RXC)
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: control frame\n", dev->name);
#endif
if (events & IEVENT_BABT) {
priv->extra_stats.tx_babt++;
#ifdef VERBOSE_GFAR_ERRORS
printk(KERN_DEBUG "%s: babt error\n", dev->name);
#endif
}
return IRQ_HANDLED;
}
/* Structure for a device driver */
static struct ocp_device_id gfar_ids[] = {
{.vendor = OCP_ANY_ID,.function = OCP_FUNC_GFAR},
{.vendor = OCP_VENDOR_INVALID}
};
static struct ocp_driver gfar_driver = {
.name = "gianfar",
.id_table = gfar_ids,
.probe = gfar_probe,
.remove = gfar_remove,
};
static int __init gfar_init(void)
{
int rc;
rc = ocp_register_driver(&gfar_driver);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
if (rc != 0) {
#else
if (rc == 0) {
#endif
ocp_unregister_driver(&gfar_driver);
return -ENODEV;
}
return 0;
}
static void __exit gfar_exit(void)
{
ocp_unregister_driver(&gfar_driver);
}
module_init(gfar_init);
module_exit(gfar_exit);
/*
* drivers/net/gianfar.h
*
* Gianfar Ethernet Driver
* Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
* Based on 8260_io/fcc_enet.c
*
* Author: Andy Fleming
* Maintainer: Kumar Gala (kumar.gala@freescale.com)
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Still left to do:
* -Add support for module parameters
*/
#ifndef __GIANFAR_H
#define __GIANFAR_H
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/crc32.h>
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
#include <linux/workqueue.h>
#else
#include <linux/tqueue.h>
#define work_struct tq_struct
#define schedule_work schedule_task
#endif
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <asm/ocp.h>
#include "gianfar_phy.h"
/* The maximum number of packets to be handled in one call of gfar_poll */
#define GFAR_DEV_WEIGHT 64
/* Number of bytes to align the rx bufs to */
#define RXBUF_ALIGNMENT 64
/* The number of bytes which composes a unit for the purpose of
* allocating data buffers. ie-for any given MTU, the data buffer
* will be the next highest multiple of 512 bytes. */
#define INCREMENTAL_BUFFER_SIZE 512
#define MAC_ADDR_LEN 6
extern char gfar_driver_name[];
extern char gfar_driver_version[];
/* These need to be powers of 2 for this driver */
#ifdef CONFIG_GFAR_NAPI
#define DEFAULT_TX_RING_SIZE 256
#define DEFAULT_RX_RING_SIZE 256
#else
#define DEFAULT_TX_RING_SIZE 64
#define DEFAULT_RX_RING_SIZE 64
#endif
#define GFAR_RX_MAX_RING_SIZE 256
#define GFAR_TX_MAX_RING_SIZE 256
#define DEFAULT_RX_BUFFER_SIZE 1536
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
#define JUMBO_BUFFER_SIZE 9728
#define JUMBO_FRAME_SIZE 9600
/* Latency of interface clock in nanoseconds */
/* Interface clock latency , in this case, means the
* time described by a value of 1 in the interrupt
* coalescing registers' time fields. Since those fields
* refer to the time it takes for 64 clocks to pass, the
* latencies are as such:
* GBIT = 125MHz => 8ns/clock => 8*64 ns / tick
* 100 = 25 MHz => 40ns/clock => 40*64 ns / tick
* 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick
*/
#define GFAR_GBIT_TIME 512
#define GFAR_100_TIME 2560
#define GFAR_10_TIME 25600
#define DEFAULT_TXCOUNT 16
#define DEFAULT_TXTIME 32768
#define DEFAULT_RXCOUNT 16
#define DEFAULT_RXTIME 32768
#define TBIPA_VALUE 0x1f
#define MIIMCFG_INIT_VALUE 0x00000007
#define MIIMCFG_RESET 0x80000000
#define MIIMIND_BUSY 0x00000001
/* MAC register bits */
#define MACCFG1_SOFT_RESET 0x80000000
#define MACCFG1_RESET_RX_MC 0x00080000
#define MACCFG1_RESET_TX_MC 0x00040000
#define MACCFG1_RESET_RX_FUN 0x00020000
#define MACCFG1_RESET_TX_FUN 0x00010000
#define MACCFG1_LOOPBACK 0x00000100
#define MACCFG1_RX_FLOW 0x00000020
#define MACCFG1_TX_FLOW 0x00000010
#define MACCFG1_SYNCD_RX_EN 0x00000008
#define MACCFG1_RX_EN 0x00000004
#define MACCFG1_SYNCD_TX_EN 0x00000002
#define MACCFG1_TX_EN 0x00000001
#define MACCFG2_INIT_SETTINGS 0x00007205
#define MACCFG2_FULL_DUPLEX 0x00000001
#define MACCFG2_IF 0x00000300
#define MACCFG2_MII 0x00000100
#define MACCFG2_GMII 0x00000200
#define MACCFG2_HUGEFRAME 0x00000020
#define MACCFG2_LENGTHCHECK 0x00000010
#define ECNTRL_INIT_SETTINGS 0x00001000
#define ECNTRL_TBI_MODE 0x00000020
#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
#define MINFLR_INIT_SETTINGS 0x00000040
/* Init to do tx snooping for buffers and descriptors */
#define DMACTRL_INIT_SETTINGS 0x000000c3
#define DMACTRL_GRS 0x00000010
#define DMACTRL_GTS 0x00000008
#define TSTAT_CLEAR_THALT 0x80000000
/* Interrupt coalescing macros */
#define IC_ICEN 0x80000000
#define IC_ICFT_MASK 0x1fe00000
#define IC_ICFT_SHIFT 21
#define mk_ic_icft(x) \
(((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK)
#define IC_ICTT_MASK 0x0000ffff
#define mk_ic_ictt(x) (x&IC_ICTT_MASK)
#define mk_ic_value(count, time) (IC_ICEN | \
mk_ic_icft(count) | \
mk_ic_ictt(time))
#define RCTRL_PROM 0x00000008
#define RSTAT_CLEAR_RHALT 0x00800000
#define IEVENT_INIT_CLEAR 0xffffffff
#define IEVENT_BABR 0x80000000
#define IEVENT_RXC 0x40000000
#define IEVENT_BSY 0x20000000
#define IEVENT_EBERR 0x10000000
#define IEVENT_MSRO 0x04000000
#define IEVENT_GTSC 0x02000000
#define IEVENT_BABT 0x01000000
#define IEVENT_TXC 0x00800000
#define IEVENT_TXE 0x00400000
#define IEVENT_TXB 0x00200000
#define IEVENT_TXF 0x00100000
#define IEVENT_LC 0x00040000
#define IEVENT_CRL 0x00020000
#define IEVENT_XFUN 0x00010000
#define IEVENT_RXB0 0x00008000
#define IEVENT_GRSC 0x00000100
#define IEVENT_RXF0 0x00000080
#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0)
#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
#define IEVENT_ERR_MASK \
(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
| IEVENT_CRL | IEVENT_XFUN)
#define IMASK_INIT_CLEAR 0x00000000
#define IMASK_BABR 0x80000000
#define IMASK_RXC 0x40000000
#define IMASK_BSY 0x20000000
#define IMASK_EBERR 0x10000000
#define IMASK_MSRO 0x04000000
#define IMASK_GRSC 0x02000000
#define IMASK_BABT 0x01000000
#define IMASK_TXC 0x00800000
#define IMASK_TXEEN 0x00400000
#define IMASK_TXBEN 0x00200000
#define IMASK_TXFEN 0x00100000
#define IMASK_LC 0x00040000
#define IMASK_CRL 0x00020000
#define IMASK_XFUN 0x00010000
#define IMASK_RXB0 0x00008000
#define IMASK_GTSC 0x00000100
#define IMASK_RXFEN0 0x00000080
#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
IMASK_XFUN | IMASK_RXC | IMASK_BABT)
/* Attribute fields */
/* This enables rx snooping for buffers and descriptors */
#ifdef CONFIG_GFAR_BDSTASH
#define ATTR_BDSTASH 0x00000800
#else
#define ATTR_BDSTASH 0x00000000
#endif
#ifdef CONFIG_GFAR_BUFSTASH
#define ATTR_BUFSTASH 0x00004000
#define STASH_LENGTH 64
#else
#define ATTR_BUFSTASH 0x00000000
#endif
#define ATTR_SNOOPING 0x000000c0
#define ATTR_INIT_SETTINGS (ATTR_SNOOPING \
| ATTR_BDSTASH | ATTR_BUFSTASH)
#define ATTRELI_INIT_SETTINGS 0x0
/* TxBD status field bits */
#define TXBD_READY 0x8000
#define TXBD_PADCRC 0x4000
#define TXBD_WRAP 0x2000
#define TXBD_INTERRUPT 0x1000
#define TXBD_LAST 0x0800
#define TXBD_CRC 0x0400
#define TXBD_DEF 0x0200
#define TXBD_HUGEFRAME 0x0080
#define TXBD_LATECOLLISION 0x0080
#define TXBD_RETRYLIMIT 0x0040
#define TXBD_RETRYCOUNTMASK 0x003c
#define TXBD_UNDERRUN 0x0002
/* RxBD status field bits */
#define RXBD_EMPTY 0x8000
#define RXBD_RO1 0x4000
#define RXBD_WRAP 0x2000
#define RXBD_INTERRUPT 0x1000
#define RXBD_LAST 0x0800
#define RXBD_FIRST 0x0400
#define RXBD_MISS 0x0100
#define RXBD_BROADCAST 0x0080
#define RXBD_MULTICAST 0x0040
#define RXBD_LARGE 0x0020
#define RXBD_NONOCTET 0x0010
#define RXBD_SHORT 0x0008
#define RXBD_CRCERR 0x0004
#define RXBD_OVERRUN 0x0002
#define RXBD_TRUNCATED 0x0001
#define RXBD_STATS 0x01ff
struct txbd8
{
u16 status; /* Status Fields */
u16 length; /* Buffer length */
u32 bufPtr; /* Buffer Pointer */
};
struct rxbd8
{
u16 status; /* Status Fields */
u16 length; /* Buffer Length */
u32 bufPtr; /* Buffer Pointer */
};
struct rmon_mib
{
u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */
u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */
u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */
u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */
u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */
u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */
u32 rbyt; /* 0x.69c - Receive Byte Counter */
u32 rpkt; /* 0x.6a0 - Receive Packet Counter */
u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */
u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */
u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */
u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */
u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */
u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */
u32 raln; /* 0x.6bc - Receive Alignment Error Counter */
u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */
u32 rcde; /* 0x.6c4 - Receive Code Error Counter */
u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */
u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */
u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */
u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */
u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */
u32 rdrp; /* 0x.6dc - Receive Drop Counter */
u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */
u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */
u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */
u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */
u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */
u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */
u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */
u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */
u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */
u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */
u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */
u32 tncl; /* 0x.70c - Transmit Total Collision Counter */
u8 res1[4];
u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */
u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */
u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */
u32 txcf; /* 0x.720 - Transmit Control Frame Counter */
u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */
u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */
u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */
u32 car1; /* 0x.730 - Carry Register One */
u32 car2; /* 0x.734 - Carry Register Two */
u32 cam1; /* 0x.738 - Carry Mask Register One */
u32 cam2; /* 0x.73c - Carry Mask Register Two */
};
struct gfar_extra_stats {
u64 kernel_dropped;
u64 rx_large;
u64 rx_short;
u64 rx_nonoctet;
u64 rx_crcerr;
u64 rx_overrun;
u64 rx_bsy;
u64 rx_babr;
u64 rx_trunc;
u64 eberr;
u64 tx_babt;
u64 tx_underrun;
u64 rx_skbmissing;
u64 tx_timeout;
};
#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
/* Number of stats in the stats structure (ignore car and cam regs)*/
#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
#define GFAR_INFOSTR_LEN 32
struct gfar_stats {
u64 extra[GFAR_EXTRA_STATS_LEN];
u64 rmon[GFAR_RMON_LEN];
};
struct gfar {
u8 res1[16];
u32 ievent; /* 0x.010 - Interrupt Event Register */
u32 imask; /* 0x.014 - Interrupt Mask Register */
u32 edis; /* 0x.018 - Error Disabled Register */
u8 res2[4];
u32 ecntrl; /* 0x.020 - Ethernet Control Register */
u32 minflr; /* 0x.024 - Minimum Frame Length Register */
u32 ptv; /* 0x.028 - Pause Time Value Register */
u32 dmactrl; /* 0x.02c - DMA Control Register */
u32 tbipa; /* 0x.030 - TBI PHY Address Register */
u8 res3[88];
u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
u8 res4[8];
u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
u8 res5[96];
u32 tctrl; /* 0x.100 - Transmit Control Register */
u32 tstat; /* 0x.104 - Transmit Status Register */
u8 res6[4];
u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */
u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */
u8 res7[16];
u32 ctbptr; /* 0x.124 - Current Transmit Buffer Descriptor Pointer Register */
u8 res8[92];
u32 tbptr; /* 0x.184 - Transmit Buffer Descriptor Pointer Low Register */
u8 res9[124];
u32 tbase; /* 0x.204 - Transmit Descriptor Base Address Register */
u8 res10[168];
u32 ostbd; /* 0x.2b0 - Out-of-Sequence Transmit Buffer Descriptor Register */
u32 ostbdp; /* 0x.2b4 - Out-of-Sequence Transmit Data Buffer Pointer Register */
u8 res11[72];
u32 rctrl; /* 0x.300 - Receive Control Register */
u32 rstat; /* 0x.304 - Receive Status Register */
u8 res12[4];
u32 rbdlen; /* 0x.30c - RxBD Data Length Register */
u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
u8 res13[16];
u32 crbptr; /* 0x.324 - Current Receive Buffer Descriptor Pointer */
u8 res14[24];
u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */
u8 res15[64];
u32 rbptr; /* 0x.384 - Receive Buffer Descriptor Pointer */
u8 res16[124];
u32 rbase; /* 0x.404 - Receive Descriptor Base Address */
u8 res17[248];
u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */
u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */
u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */
u32 hafdup; /* 0x.50c - Half Duplex Register */
u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
u8 res18[12];
u32 miimcfg; /* 0x.520 - MII Management Configuration Register */
u32 miimcom; /* 0x.524 - MII Management Command Register */
u32 miimadd; /* 0x.528 - MII Management Address Register */
u32 miimcon; /* 0x.52c - MII Management Control Register */
u32 miimstat; /* 0x.530 - MII Management Status Register */
u32 miimind; /* 0x.534 - MII Management Indicator Register */
u8 res19[4];
u32 ifstat; /* 0x.53c - Interface Status Register */
u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
u8 res20[312];
struct rmon_mib rmon;
u8 res21[192];
u32 iaddr0; /* 0x.800 - Indivdual address register 0 */
u32 iaddr1; /* 0x.804 - Indivdual address register 1 */
u32 iaddr2; /* 0x.808 - Indivdual address register 2 */
u32 iaddr3; /* 0x.80c - Indivdual address register 3 */
u32 iaddr4; /* 0x.810 - Indivdual address register 4 */
u32 iaddr5; /* 0x.814 - Indivdual address register 5 */
u32 iaddr6; /* 0x.818 - Indivdual address register 6 */
u32 iaddr7; /* 0x.81c - Indivdual address register 7 */
u8 res22[96];
u32 gaddr0; /* 0x.880 - Global address register 0 */
u32 gaddr1; /* 0x.884 - Global address register 1 */
u32 gaddr2; /* 0x.888 - Global address register 2 */
u32 gaddr3; /* 0x.88c - Global address register 3 */
u32 gaddr4; /* 0x.890 - Global address register 4 */
u32 gaddr5; /* 0x.894 - Global address register 5 */
u32 gaddr6; /* 0x.898 - Global address register 6 */
u32 gaddr7; /* 0x.89c - Global address register 7 */
u8 res23[856];
u32 attr; /* 0x.bf8 - Attributes Register */
u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
u8 res24[1024];
};
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
* (Ok, that's not so true anymore, but there is a family resemblence)
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
* and tx_bd_base always point to the currently available buffer.
* The dirty_tx tracks the current buffer that is being sent by the
* controller. The cur_tx and dirty_tx are equal under both completely
* empty and completely full conditions. The empty/ready indicator in
* the buffer descriptor determines the actual condition.
*/
struct gfar_private
{
/* pointers to arrays of skbuffs for tx and rx */
struct sk_buff ** tx_skbuff;
struct sk_buff ** rx_skbuff;
/* indices pointing to the next free sbk in skb arrays */
u16 skb_curtx;
u16 skb_currx;
/* index of the first skb which hasn't been transmitted
* yet. */
u16 skb_dirtytx;
/* Configuration info for the coalescing features */
unsigned char txcoalescing;
unsigned short txcount;
unsigned short txtime;
unsigned char rxcoalescing;
unsigned short rxcount;
unsigned short rxtime;
/* GFAR addresses */
struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */
struct txbd8 *tx_bd_base;
struct rxbd8 *cur_rx; /* Next free rx ring entry */
struct txbd8 *cur_tx; /* Next free ring entry */
struct txbd8 *dirty_tx; /* The Ring entry to be freed. */
struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */
struct phy_info *phyinfo;
struct gfar *phyregs;
struct work_struct tq;
struct timer_list phy_info_timer;
struct net_device_stats stats; /* linux network statistics */
struct gfar_extra_stats extra_stats;
spinlock_t lock;
unsigned int rx_buffer_size;
unsigned int rx_stash_size;
unsigned int tx_ring_size;
unsigned int rx_ring_size;
wait_queue_head_t rxcleanupq;
unsigned int rxclean;
int link; /* current link state */
int oldlink;
int duplexity; /* Indicates negotiated duplex state */
int olddplx;
int speed; /* Indicates negotiated speed */
int oldspeed;
/* Info structure initialized by board setup code */
struct ocp_gfar_data *einfo;
};
extern inline u32 gfar_read(volatile unsigned *addr)
{
u32 val;
val = in_be32(addr);
return val;
}
extern inline void gfar_write(volatile unsigned *addr, u32 val)
{
out_be32(addr, val);
}
#endif /* __GIANFAR_H */
/*
* drivers/net/gianfar_ethtool.c
*
* Gianfar Ethernet Driver
* Ethtool support for Gianfar Enet
* Based on e1000 ethtool support
*
* Author: Andy Fleming
* Maintainer: Kumar Gala (kumar.gala@freescale.com)
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This software may be used and distributed according to
* the terms of the GNU Public License, Version 2, incorporated herein
* by reference.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/crc32.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <linux/ethtool.h>
#include "gianfar.h"
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
extern int startup_gfar(struct net_device *dev);
extern void stop_gfar(struct net_device *dev);
extern void gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
u64 * buf);
void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
static char stat_gstrings[][ETH_GSTRING_LEN] = {
"RX Dropped by Kernel",
"RX Large Frame Errors",
"RX Short Frame Errors",
"RX Non-Octet Errors",
"RX CRC Errors",
"RX Overrun Errors",
"RX Busy Errors",
"RX Babbling Errors",
"RX Truncated Frames",
"Ethernet Bus Error",
"TX Babbling Errors",
"TX Underrun Errors",
"RX SKB Missing Errors",
"TX Timeout Errors",
"tx&rx 64B frames",
"tx&rx 65-127B frames",
"tx&rx 128-255B frames",
"tx&rx 256-511B frames",
"tx&rx 512-1023B frames",
"tx&rx 1024-1518B frames",
"tx&rx 1519-1522B Good VLAN",
"RX bytes",
"RX Packets",
"RX FCS Errors",
"Receive Multicast Packet",
"Receive Broadcast Packet",
"RX Control Frame Packets",
"RX Pause Frame Packets",
"RX Unknown OP Code",
"RX Alignment Error",
"RX Frame Length Error",
"RX Code Error",
"RX Carrier Sense Error",
"RX Undersize Packets",
"RX Oversize Packets",
"RX Fragmented Frames",
"RX Jabber Frames",
"RX Dropped Frames",
"TX Byte Counter",
"TX Packets",
"TX Multicast Packets",
"TX Broadcast Packets",
"TX Pause Control Frames",
"TX Deferral Packets",
"TX Excessive Deferral Packets",
"TX Single Collision Packets",
"TX Multiple Collision Packets",
"TX Late Collision Packets",
"TX Excessive Collision Packets",
"TX Total Collision",
"RESERVED",
"TX Dropped Frames",
"TX Jabber Frames",
"TX FCS Errors",
"TX Control Frames",
"TX Oversize Frames",
"TX Undersize Frames",
"TX Fragmented Frames",
};
/* Fill in an array of 64-bit statistics from various sources.
* This array will be appended to the end of the ethtool_stats
* structure, and returned to user space
*/
void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
{
int i;
struct gfar_private *priv = (struct gfar_private *) dev->priv;
u32 *rmon = (u32 *) & priv->regs->rmon;
u64 *extra = (u64 *) & priv->extra_stats;
struct gfar_stats *stats = (struct gfar_stats *) buf;
for (i = 0; i < GFAR_RMON_LEN; i++) {
stats->rmon[i] = (u64) (rmon[i]);
}
for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
stats->extra[i] = extra[i];
}
}
/* Returns the number of stats (and their corresponding strings) */
int gfar_stats_count(struct net_device *dev)
{
return GFAR_STATS_LEN;
}
void gfar_gstrings_normon(struct net_device *dev, u32 stringset, u8 * buf)
{
memcpy(buf, stat_gstrings, GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
}
void gfar_fill_stats_normon(struct net_device *dev,
struct ethtool_stats *dummy, u64 * buf)
{
int i;
struct gfar_private *priv = (struct gfar_private *) dev->priv;
u64 *extra = (u64 *) & priv->extra_stats;
for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
buf[i] = extra[i];
}
}
int gfar_stats_count_normon(struct net_device *dev)
{
return GFAR_EXTRA_STATS_LEN;
}
/* Fills in the drvinfo structure with some basic info */
void gfar_gdrvinfo(struct net_device *dev, struct
ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, gfar_driver_name, GFAR_INFOSTR_LEN);
strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
drvinfo->n_stats = GFAR_STATS_LEN;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
/* Return the current settings in the ethtool_cmd structure */
int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
uint gigabit_support =
priv->einfo->flags & GFAR_HAS_GIGABIT ? SUPPORTED_1000baseT_Full : 0;
uint gigabit_advert =
priv->einfo->flags & GFAR_HAS_GIGABIT ? ADVERTISED_1000baseT_Full: 0;
cmd->supported = (SUPPORTED_10baseT_Half
| SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
| gigabit_support | SUPPORTED_Autoneg);
/* For now, we always advertise everything */
cmd->advertising = (ADVERTISED_10baseT_Half
| ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
| gigabit_advert | ADVERTISED_Autoneg);
cmd->speed = priv->speed;
cmd->duplex = priv->duplexity;
cmd->port = PORT_MII;
cmd->phy_address = priv->einfo->phyid;
cmd->transceiver = XCVR_EXTERNAL;
cmd->autoneg = AUTONEG_ENABLE;
cmd->maxtxpkt = priv->txcount;
cmd->maxrxpkt = priv->rxcount;
return 0;
}
/* Return the length of the register structure */
int gfar_reglen(struct net_device *dev)
{
return sizeof (struct gfar);
}
/* Return a dump of the GFAR register space */
void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
{
int i;
struct gfar_private *priv = (struct gfar_private *) dev->priv;
u32 *theregs = (u32 *) priv->regs;
u32 *buf = (u32 *) regbuf;
for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
buf[i] = theregs[i];
}
/* Return the link state 1 is up, 0 is down */
u32 gfar_get_link(struct net_device *dev)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
return (u32) priv->link;
}
/* Fill in a buffer with the strings which correspond to the
* stats */
void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
{
memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
}
/* Convert microseconds to ethernet clock ticks, which changes
* depending on what speed the controller is running at */
static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
{
unsigned int count;
/* The timer is different, depending on the interface speed */
switch (priv->speed) {
case 1000:
count = GFAR_GBIT_TIME;
break;
case 100:
count = GFAR_100_TIME;
break;
case 10:
default:
count = GFAR_10_TIME;
break;
}
/* Make sure we return a number greater than 0
* if usecs > 0 */
return ((usecs * 1000 + count - 1) / count);
}
/* Convert ethernet clock ticks to microseconds */
static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
{
unsigned int count;
/* The timer is different, depending on the interface speed */
switch (priv->speed) {
case 1000:
count = GFAR_GBIT_TIME;
break;
case 100:
count = GFAR_100_TIME;
break;
case 10:
default:
count = GFAR_10_TIME;
break;
}
/* Make sure we return a number greater than 0 */
/* if ticks is > 0 */
return ((ticks * count) / 1000);
}
/* Get the coalescing parameters, and put them in the cvals
* structure. */
int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime);
cvals->rx_max_coalesced_frames = priv->rxcount;
cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, priv->txtime);
cvals->tx_max_coalesced_frames = priv->txcount;
cvals->use_adaptive_rx_coalesce = 0;
cvals->use_adaptive_tx_coalesce = 0;
cvals->pkt_rate_low = 0;
cvals->rx_coalesce_usecs_low = 0;
cvals->rx_max_coalesced_frames_low = 0;
cvals->tx_coalesce_usecs_low = 0;
cvals->tx_max_coalesced_frames_low = 0;
/* When the packet rate is below pkt_rate_high but above
* pkt_rate_low (both measured in packets per second) the
* normal {rx,tx}_* coalescing parameters are used.
*/
/* When the packet rate is (measured in packets per second)
* is above pkt_rate_high, the {rx,tx}_*_high parameters are
* used.
*/
cvals->pkt_rate_high = 0;
cvals->rx_coalesce_usecs_high = 0;
cvals->rx_max_coalesced_frames_high = 0;
cvals->tx_coalesce_usecs_high = 0;
cvals->tx_max_coalesced_frames_high = 0;
/* How often to do adaptive coalescing packet rate sampling,
* measured in seconds. Must not be zero.
*/
cvals->rate_sample_interval = 0;
return 0;
}
/* Change the coalescing values.
* Both cvals->*_usecs and cvals->*_frames have to be > 0
* in order for coalescing to be active
*/
int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
/* Set up rx coalescing */
if ((cvals->rx_coalesce_usecs == 0) ||
(cvals->rx_max_coalesced_frames == 0))
priv->rxcoalescing = 0;
else
priv->rxcoalescing = 1;
priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs);
priv->rxcount = cvals->rx_max_coalesced_frames;
/* Set up tx coalescing */
if ((cvals->tx_coalesce_usecs == 0) ||
(cvals->tx_max_coalesced_frames == 0))
priv->txcoalescing = 0;
else
priv->txcoalescing = 1;
priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs);
priv->txcount = cvals->tx_max_coalesced_frames;
if (priv->rxcoalescing)
gfar_write(&priv->regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
else
gfar_write(&priv->regs->rxic, 0);
if (priv->txcoalescing)
gfar_write(&priv->regs->txic,
mk_ic_value(priv->txcount, priv->txtime));
else
gfar_write(&priv->regs->txic, 0);
return 0;
}
/* Fills in rvals with the current ring parameters. Currently,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
/* Values changeable by the user. The valid values are
* in the range 1 to the "*_max_pending" counterpart above.
*/
rvals->rx_pending = priv->rx_ring_size;
rvals->rx_mini_pending = priv->rx_ring_size;
rvals->rx_jumbo_pending = priv->rx_ring_size;
rvals->tx_pending = priv->tx_ring_size;
}
/* Change the current ring parameters, stopping the controller if
* necessary so that we don't mess things up while we're in
* motion. We wait for the ring to be clean before reallocating
* the rings. */
int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
{
u32 tempval;
struct gfar_private *priv = (struct gfar_private *) dev->priv;
int err = 0;
if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
return -EINVAL;
if (!is_power_of_2(rvals->rx_pending)) {
printk("%s: Ring sizes must be a power of 2\n",
dev->name);
return -EINVAL;
}
if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
return -EINVAL;
if (!is_power_of_2(rvals->tx_pending)) {
printk("%s: Ring sizes must be a power of 2\n",
dev->name);
return -EINVAL;
}
/* Stop the controller so we don't rx any more frames */
/* But first, make sure we clear the bits */
tempval = gfar_read(&priv->regs->dmactrl);
tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&priv->regs->dmactrl, tempval);
tempval = gfar_read(&priv->regs->dmactrl);
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&priv->regs->dmactrl, tempval);
while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
cpu_relax();
/* Note that rx is not clean right now */
priv->rxclean = 0;
if (dev->flags & IFF_UP) {
/* Tell the driver to process the rest of the frames */
gfar_receive(0, (void *) dev, NULL);
/* Now wait for it to be done */
wait_event_interruptible(priv->rxcleanupq, priv->rxclean);
/* Ok, all packets have been handled. Now we bring it down,
* change the ring size, and bring it up */
stop_gfar(dev);
}
priv->rx_ring_size = rvals->rx_pending;
priv->tx_ring_size = rvals->tx_pending;
if (dev->flags & IFF_UP)
err = startup_gfar(dev);
return err;
}
struct ethtool_ops gfar_ethtool_ops = {
.get_settings = gfar_gsettings,
.get_drvinfo = gfar_gdrvinfo,
.get_regs_len = gfar_reglen,
.get_regs = gfar_get_regs,
.get_link = gfar_get_link,
.get_coalesce = gfar_gcoalesce,
.set_coalesce = gfar_scoalesce,
.get_ringparam = gfar_gringparam,
.set_ringparam = gfar_sringparam,
.get_strings = gfar_gstrings,
.get_stats_count = gfar_stats_count,
.get_ethtool_stats = gfar_fill_stats,
};
/*
* drivers/net/gianfar_phy.c
*
* Gianfar Ethernet Driver -- PHY handling
* Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
* Based on 8260_io/fcc_enet.c
*
* Author: Andy Fleming
* Maintainer: Kumar Gala (kumar.gala@freescale.com)
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/crc32.h>
#include "gianfar.h"
#include "gianfar_phy.h"
/* Write value to the PHY for this device to the register at regnum, */
/* waiting until the write is done before it returns. All PHY */
/* configuration has to be done through the TSEC1 MIIM regs */
void write_phy_reg(struct net_device *dev, u16 regnum, u16 value)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
struct gfar *regbase = priv->phyregs;
struct ocp_gfar_data *einfo = priv->einfo;
/* Set the PHY address and the register address we want to write */
gfar_write(&regbase->miimadd, ((einfo->phyid) << 8) | regnum);
/* Write out the value we want */
gfar_write(&regbase->miimcon, value);
/* Wait for the transaction to finish */
while (gfar_read(&regbase->miimind) & MIIMIND_BUSY)
cpu_relax();
}
/* Reads from register regnum in the PHY for device dev, */
/* returning the value. Clears miimcom first. All PHY */
/* configuration has to be done through the TSEC1 MIIM regs */
u16 read_phy_reg(struct net_device *dev, u16 regnum)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
struct gfar *regbase = priv->phyregs;
struct ocp_gfar_data *einfo = priv->einfo;
u16 value;
/* Set the PHY address and the register address we want to read */
gfar_write(&regbase->miimadd, ((einfo->phyid) << 8) | regnum);
/* Clear miimcom, and then initiate a read */
gfar_write(&regbase->miimcom, 0);
gfar_write(&regbase->miimcom, MIIM_READ_COMMAND);
/* Wait for the transaction to finish */
while (gfar_read(&regbase->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
cpu_relax();
/* Grab the value of the register from miimstat */
value = gfar_read(&regbase->miimstat);
return value;
}
/* returns which value to write to the control register. */
/* For 10/100 the value is slightly different. */
u16 mii_cr_init(u16 mii_reg, struct net_device * dev)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
struct ocp_gfar_data *einfo = priv->einfo;
if (einfo->flags & GFAR_HAS_GIGABIT)
return MIIM_CONTROL_INIT;
else
return MIIM_CR_INIT;
}
#define BRIEF_GFAR_ERRORS
/* Wait for auto-negotiation to complete */
u16 mii_parse_sr(u16 mii_reg, struct net_device * dev)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
unsigned int timeout = GFAR_AN_TIMEOUT;
if (mii_reg & MIIM_STATUS_LINK)
priv->link = 1;
else
priv->link = 0;
/* Only auto-negotiate if the link has just gone up */
if (priv->link && !priv->oldlink) {
while ((!(mii_reg & MIIM_STATUS_AN_DONE)) && timeout--)
mii_reg = read_phy_reg(dev, MIIM_STATUS);
#if defined(BRIEF_GFAR_ERRORS)
if (mii_reg & MIIM_STATUS_AN_DONE)
printk(KERN_INFO "%s: Auto-negotiation done\n",
dev->name);
else
printk(KERN_INFO "%s: Auto-negotiation timed out\n",
dev->name);
#endif
}
return 0;
}
/* Determine the speed and duplex which was negotiated */
u16 mii_parse_88E1011_psr(u16 mii_reg, struct net_device * dev)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
unsigned int speed;
if (priv->link) {
if (mii_reg & MIIM_88E1011_PHYSTAT_DUPLEX)
priv->duplexity = 1;
else
priv->duplexity = 0;
speed = (mii_reg & MIIM_88E1011_PHYSTAT_SPEED);
switch (speed) {
case MIIM_88E1011_PHYSTAT_GBIT:
priv->speed = 1000;
break;
case MIIM_88E1011_PHYSTAT_100:
priv->speed = 100;
break;
default:
priv->speed = 10;
break;
}
} else {
priv->speed = 0;
priv->duplexity = 0;
}
return 0;
}
u16 mii_parse_cis8201(u16 mii_reg, struct net_device * dev)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
unsigned int speed;
if (priv->link) {
if (mii_reg & MIIM_CIS8201_AUXCONSTAT_DUPLEX)
priv->duplexity = 1;
else
priv->duplexity = 0;
speed = mii_reg & MIIM_CIS8201_AUXCONSTAT_SPEED;
switch (speed) {
case MIIM_CIS8201_AUXCONSTAT_GBIT:
priv->speed = 1000;
break;
case MIIM_CIS8201_AUXCONSTAT_100:
priv->speed = 100;
break;
default:
priv->speed = 10;
break;
}
} else {
priv->speed = 0;
priv->duplexity = 0;
}
return 0;
}
u16 mii_parse_dm9161_scsr(u16 mii_reg, struct net_device * dev)
{
struct gfar_private *priv = (struct gfar_private *) dev->priv;
if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_100H))
priv->speed = 100;
else
priv->speed = 10;
if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_10F))
priv->duplexity = 1;
else
priv->duplexity = 0;
return 0;
}
u16 dm9161_wait(u16 mii_reg, struct net_device *dev)
{
int timeout = HZ;
int secondary = 10;
u16 temp;
do {
/* Davicom takes a bit to come up after a reset,
* so wait here for a bit */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(timeout);
temp = read_phy_reg(dev, MIIM_STATUS);
secondary--;
} while ((!(temp & MIIM_STATUS_AN_DONE)) && secondary);
return 0;
}
static struct phy_info phy_info_M88E1011S = {
0x01410c6,
"Marvell 88E1011S",
4,
(const struct phy_cmd[]) { /* config */
/* Reset and configure the PHY */
{MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
{miim_end,}
},
(const struct phy_cmd[]) { /* startup */
/* Status is read once to clear old link state */
{MIIM_STATUS, miim_read, NULL},
/* Auto-negotiate */
{MIIM_STATUS, miim_read, mii_parse_sr},
/* Read the status */
{MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr},
/* Clear the IEVENT register */
{MIIM_88E1011_IEVENT, miim_read, NULL},
/* Set up the mask */
{MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* ack_int */
/* Clear the interrupt */
{MIIM_88E1011_IEVENT, miim_read, NULL},
/* Disable interrupts */
{MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* handle_int */
/* Read the Status (2x to make sure link is right) */
{MIIM_STATUS, miim_read, NULL},
/* Check the status */
{MIIM_STATUS, miim_read, mii_parse_sr},
{MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr},
/* Enable Interrupts */
{MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* shutdown */
{MIIM_88E1011_IEVENT, miim_read, NULL},
{MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL},
{miim_end,}
},
};
/* Cicada 8204 */
static struct phy_info phy_info_cis8204 = {
0x3f11,
"Cicada Cis8204",
6,
(const struct phy_cmd[]) { /* config */
/* Override PHY config settings */
{MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL},
/* Set up the interface mode */
{MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL},
/* Configure some basic stuff */
{MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
{miim_end,}
},
(const struct phy_cmd[]) { /* startup */
/* Read the Status (2x to make sure link is right) */
{MIIM_STATUS, miim_read, NULL},
/* Auto-negotiate */
{MIIM_STATUS, miim_read, mii_parse_sr},
/* Read the status */
{MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
/* Clear the status register */
{MIIM_CIS8204_ISTAT, miim_read, NULL},
/* Enable interrupts */
{MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* ack_int */
/* Clear the status register */
{MIIM_CIS8204_ISTAT, miim_read, NULL},
/* Disable interrupts */
{MIIM_CIS8204_IMASK, 0x0, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* handle_int */
/* Read the Status (2x to make sure link is right) */
{MIIM_STATUS, miim_read, NULL},
/* Auto-negotiate */
{MIIM_STATUS, miim_read, mii_parse_sr},
/* Read the status */
{MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
/* Enable interrupts */
{MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* shutdown */
/* Clear the status register */
{MIIM_CIS8204_ISTAT, miim_read, NULL},
/* Disable interrupts */
{MIIM_CIS8204_IMASK, 0x0, NULL},
{miim_end,}
},
};
/* Cicada 8201 */
static struct phy_info phy_info_cis8201 = {
0xfc41,
"CIS8201",
4,
(const struct phy_cmd[]) { /* config */
/* Override PHY config settings */
{MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL},
/* Set up the interface mode */
{MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL},
/* Configure some basic stuff */
{MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
{miim_end,}
},
(const struct phy_cmd[]) { /* startup */
/* Read the Status (2x to make sure link is right) */
{MIIM_STATUS, miim_read, NULL},
/* Auto-negotiate */
{MIIM_STATUS, miim_read, mii_parse_sr},
/* Read the status */
{MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
{miim_end,}
},
(const struct phy_cmd[]) { /* ack_int */
{miim_end,}
},
(const struct phy_cmd[]) { /* handle_int */
{miim_end,}
},
(const struct phy_cmd[]) { /* shutdown */
{miim_end,}
},
};
static struct phy_info phy_info_dm9161 = {
0x0181b88,
"Davicom DM9161E",
4,
(const struct phy_cmd[]) { /* config */
{MIIM_CONTROL, MIIM_DM9161_CR_STOP, NULL},
/* Do not bypass the scrambler/descrambler */
{MIIM_DM9161_SCR, MIIM_DM9161_SCR_INIT, NULL},
/* Clear 10BTCSR to default */
{MIIM_DM9161_10BTCSR, MIIM_DM9161_10BTCSR_INIT, NULL},
/* Configure some basic stuff */
{MIIM_CONTROL, MIIM_CR_INIT, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* startup */
/* Restart Auto Negotiation */
{MIIM_CONTROL, MIIM_DM9161_CR_RSTAN, NULL},
/* Status is read once to clear old link state */
{MIIM_STATUS, miim_read, dm9161_wait},
/* Auto-negotiate */
{MIIM_STATUS, miim_read, mii_parse_sr},
/* Read the status */
{MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr},
/* Clear any pending interrupts */
{MIIM_DM9161_INTR, miim_read, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* ack_int */
{MIIM_DM9161_INTR, miim_read, NULL},
{miim_end,}
},
(const struct phy_cmd[]) { /* handle_int */
{MIIM_STATUS, miim_read, NULL},
{MIIM_STATUS, miim_read, mii_parse_sr},
{MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr},
{miim_end,}
},
(const struct phy_cmd[]) { /* shutdown */
{MIIM_DM9161_INTR, miim_read, NULL},
{miim_end,}
},
};
static struct phy_info *phy_info[] = {
&phy_info_cis8201,
&phy_info_cis8204,
&phy_info_M88E1011S,
&phy_info_dm9161,
NULL
};
/* Use the PHY ID registers to determine what type of PHY is attached
* to device dev. return a struct phy_info structure describing that PHY
*/
struct phy_info * get_phy_info(struct net_device *dev)
{
u16 phy_reg;
u32 phy_ID;
int i;
struct phy_info *theInfo = NULL;
/* Grab the bits from PHYIR1, and put them in the upper half */
phy_reg = read_phy_reg(dev, MIIM_PHYIR1);
phy_ID = (phy_reg & 0xffff) << 16;
/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = read_phy_reg(dev, MIIM_PHYIR2);
phy_ID |= (phy_reg & 0xffff);
/* loop through all the known PHY types, and find one that */
/* matches the ID we read from the PHY. */
for (i = 0; phy_info[i]; i++)
if (phy_info[i]->id == (phy_ID >> phy_info[i]->shift))
theInfo = phy_info[i];
if (theInfo == NULL) {
printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID);
return NULL;
} else {
printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name,
phy_ID);
}
return theInfo;
}
/* Take a list of struct phy_cmd, and, depending on the values, either */
/* read or write, using a helper function if provided */
/* It is assumed that all lists of struct phy_cmd will be terminated by */
/* mii_end. */
void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd)
{
int i;
u16 result;
struct gfar_private *priv = (struct gfar_private *) dev->priv;
struct gfar *phyregs = priv->phyregs;
/* Reset the management interface */
gfar_write(&phyregs->miimcfg, MIIMCFG_RESET);
/* Setup the MII Mgmt clock speed */
gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE);
/* Wait until the bus is free */
while (gfar_read(&phyregs->miimind) & MIIMIND_BUSY)
cpu_relax();
for (i = 0; cmd->mii_reg != miim_end; i++) {
/* The command is a read if mii_data is miim_read */
if (cmd->mii_data == miim_read) {
/* Read the value of the PHY reg */
result = read_phy_reg(dev, cmd->mii_reg);
/* If a function was supplied, we need to let it process */
/* the result. */
if (cmd->funct != NULL)
(*(cmd->funct)) (result, dev);
} else { /* Otherwise, it's a write */
/* If a function was supplied, it will provide
* the value to write */
/* Otherwise, the value was supplied in cmd->mii_data */
if (cmd->funct != NULL)
result = (*(cmd->funct)) (0, dev);
else
result = cmd->mii_data;
/* Write the appropriate value to the PHY reg */
write_phy_reg(dev, cmd->mii_reg, result);
}
cmd++;
}
}
/*
* drivers/net/gianfar_phy.h
*
* Gianfar Ethernet Driver -- PHY handling
* Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
* Based on 8260_io/fcc_enet.c
*
* Author: Andy Fleming
* Maintainer: Kumar Gala (kumar.gala@freescale.com)
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __GIANFAR_PHY_H
#define __GIANFAR_PHY_H
#define miim_end ((u32)-2)
#define miim_read ((u32)-1)
#define MIIMIND_BUSY 0x00000001
#define MIIMIND_NOTVALID 0x00000004
#define MIIM_CONTROL 0x00
#define MIIM_CONTROL_RESET 0x00008000
#define MIIM_CONTROL_INIT 0x00001140
#define MIIM_ANEN 0x00001000
#define MIIM_CR 0x00
#define MIIM_CR_RST 0x00008000
#define MIIM_CR_INIT 0x00001000
#define MIIM_STATUS 0x1
#define MIIM_STATUS_AN_DONE 0x00000020
#define MIIM_STATUS_LINK 0x0004
#define MIIM_PHYIR1 0x2
#define MIIM_PHYIR2 0x3
#define GFAR_AN_TIMEOUT 0x000fffff
#define MIIM_ANLPBPA 0x5
#define MIIM_ANLPBPA_HALF 0x00000040
#define MIIM_ANLPBPA_FULL 0x00000020
#define MIIM_ANEX 0x6
#define MIIM_ANEX_NP 0x00000004
#define MIIM_ANEX_PRX 0x00000002
/* Cicada Extended Control Register 1 */
#define MIIM_CIS8201_EXT_CON1 0x17
#define MIIM_CIS8201_EXTCON1_INIT 0x0000
/* Cicada Interrupt Mask Register */
#define MIIM_CIS8204_IMASK 0x19
#define MIIM_CIS8204_IMASK_IEN 0x8000
#define MIIM_CIS8204_IMASK_SPEED 0x4000
#define MIIM_CIS8204_IMASK_LINK 0x2000
#define MIIM_CIS8204_IMASK_DUPLEX 0x1000
#define MIIM_CIS8204_IMASK_MASK 0xf000
/* Cicada Interrupt Status Register */
#define MIIM_CIS8204_ISTAT 0x1a
#define MIIM_CIS8204_ISTAT_STATUS 0x8000
#define MIIM_CIS8204_ISTAT_SPEED 0x4000
#define MIIM_CIS8204_ISTAT_LINK 0x2000
#define MIIM_CIS8204_ISTAT_DUPLEX 0x1000
/* Cicada Auxiliary Control/Status Register */
#define MIIM_CIS8201_AUX_CONSTAT 0x1c
#define MIIM_CIS8201_AUXCONSTAT_INIT 0x0004
#define MIIM_CIS8201_AUXCONSTAT_DUPLEX 0x0020
#define MIIM_CIS8201_AUXCONSTAT_SPEED 0x0018
#define MIIM_CIS8201_AUXCONSTAT_GBIT 0x0010
#define MIIM_CIS8201_AUXCONSTAT_100 0x0008
/* 88E1011 PHY Status Register */
#define MIIM_88E1011_PHY_STATUS 0x11
#define MIIM_88E1011_PHYSTAT_SPEED 0xc000
#define MIIM_88E1011_PHYSTAT_GBIT 0x8000
#define MIIM_88E1011_PHYSTAT_100 0x4000
#define MIIM_88E1011_PHYSTAT_DUPLEX 0x2000
#define MIIM_88E1011_PHYSTAT_LINK 0x0400
#define MIIM_88E1011_IEVENT 0x13
#define MIIM_88E1011_IEVENT_CLEAR 0x0000
#define MIIM_88E1011_IMASK 0x12
#define MIIM_88E1011_IMASK_INIT 0x6400
#define MIIM_88E1011_IMASK_CLEAR 0x0000
/* DM9161 Control register values */
#define MIIM_DM9161_CR_STOP 0x0400
#define MIIM_DM9161_CR_RSTAN 0x1200
#define MIIM_DM9161_SCR 0x10
#define MIIM_DM9161_SCR_INIT 0x0610
/* DM9161 Specified Configuration and Status Register */
#define MIIM_DM9161_SCSR 0x11
#define MIIM_DM9161_SCSR_100F 0x8000
#define MIIM_DM9161_SCSR_100H 0x4000
#define MIIM_DM9161_SCSR_10F 0x2000
#define MIIM_DM9161_SCSR_10H 0x1000
/* DM9161 Interrupt Register */
#define MIIM_DM9161_INTR 0x15
#define MIIM_DM9161_INTR_PEND 0x8000
#define MIIM_DM9161_INTR_DPLX_MASK 0x0800
#define MIIM_DM9161_INTR_SPD_MASK 0x0400
#define MIIM_DM9161_INTR_LINK_MASK 0x0200
#define MIIM_DM9161_INTR_MASK 0x0100
#define MIIM_DM9161_INTR_DPLX_CHANGE 0x0010
#define MIIM_DM9161_INTR_SPD_CHANGE 0x0008
#define MIIM_DM9161_INTR_LINK_CHANGE 0x0004
#define MIIM_DM9161_INTR_INIT 0x0000
#define MIIM_DM9161_INTR_STOP \
(MIIM_DM9161_INTR_DPLX_MASK | MIIM_DM9161_INTR_SPD_MASK \
| MIIM_DM9161_INTR_LINK_MASK | MIIM_DM9161_INTR_MASK)
/* DM9161 10BT Configuration/Status */
#define MIIM_DM9161_10BTCSR 0x12
#define MIIM_DM9161_10BTCSR_INIT 0x7800
#define MIIM_READ_COMMAND 0x00000001
/*
* struct phy_cmd: A command for reading or writing a PHY register
*
* mii_reg: The register to read or write
*
* mii_data: For writes, the value to put in the register.
* A value of -1 indicates this is a read.
*
* funct: A function pointer which is invoked for each command.
* For reads, this function will be passed the value read
* from the PHY, and process it.
* For writes, the result of this function will be written
* to the PHY register
*/
struct phy_cmd {
u32 mii_reg;
u32 mii_data;
u16 (*funct) (u16 mii_reg, struct net_device * dev);
};
/* struct phy_info: a structure which defines attributes for a PHY
*
* id will contain a number which represents the PHY. During
* startup, the driver will poll the PHY to find out what its
* UID--as defined by registers 2 and 3--is. The 32-bit result
* gotten from the PHY will be shifted right by "shift" bits to
* discard any bits which may change based on revision numbers
* unimportant to functionality
*
* The struct phy_cmd entries represent pointers to an arrays of
* commands which tell the driver what to do to the PHY.
*/
struct phy_info {
u32 id;
char *name;
unsigned int shift;
/* Called to configure the PHY, and modify the controller
* based on the results */
const struct phy_cmd *config;
/* Called when starting up the controller. Usually sets
* up the interrupt for state changes */
const struct phy_cmd *startup;
/* Called inside the interrupt handler to acknowledge
* the interrupt */
const struct phy_cmd *ack_int;
/* Called in the bottom half to handle the interrupt */
const struct phy_cmd *handle_int;
/* Called when bringing down the controller. Usually stops
* the interrupts from being generated */
const struct phy_cmd *shutdown;
};
struct phy_info *get_phy_info(struct net_device *dev);
void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd);
#endif /* GIANFAR_PHY_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 Patton Electronics Company
* Copyright (C) 2002 Momentum Computer
*
* Copyright 2000 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* stevel@mvista.com or support@mvista.com
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Ethernet driver definitions for the MIPS GT96100 Advanced
* Communication Controller.
*
* Modified for the Marvellous GT64240 Retarded Communication Controller.
*/
#ifndef _GT64240ETH_H
#define _GT64240ETH_H
#include <asm/gt64240.h>
#define ETHERNET_PORTS_DIFFERENCE_OFFSETS 0x400
/* Translate those weanie names from Galileo/VxWorks header files: */
#define GT64240_MRR MAIN_ROUTING_REGISTER
#define GT64240_CIU_ARBITER_CONFIG COMM_UNIT_ARBITER_CONFIGURATION_REGISTER
#define GT64240_CIU_ARBITER_CONTROL COMM_UNIT_ARBITER_CONTROL
#define GT64240_MAIN_LOW_CAUSE LOW_INTERRUPT_CAUSE_REGISTER
#define GT64240_MAIN_HIGH_CAUSE HIGH_INTERRUPT_CAUSE_REGISTER
#define GT64240_CPU_LOW_MASK CPU_INTERRUPT_MASK_REGISTER_LOW
#define GT64240_CPU_HIGH_MASK CPU_INTERRUPT_MASK_REGISTER_HIGH
#define GT64240_CPU_SELECT_CAUSE CPU_SELECT_CAUSE_REGISTER
#define GT64240_ETH_PHY_ADDR_REG ETHERNET_PHY_ADDRESS_REGISTER
#define GT64240_ETH_PORT_CONFIG ETHERNET0_PORT_CONFIGURATION_REGISTER
#define GT64240_ETH_PORT_CONFIG_EXT ETHERNET0_PORT_CONFIGURATION_EXTEND_REGISTER
#define GT64240_ETH_PORT_COMMAND ETHERNET0_PORT_COMMAND_REGISTER
#define GT64240_ETH_PORT_STATUS ETHERNET0_PORT_STATUS_REGISTER
#define GT64240_ETH_IO_SIZE ETHERNET_PORTS_DIFFERENCE_OFFSETS
#define GT64240_ETH_SMI_REG ETHERNET_SMI_REGISTER
#define GT64240_ETH_MIB_COUNT_BASE ETHERNET0_MIB_COUNTER_BASE
#define GT64240_ETH_SDMA_CONFIG ETHERNET0_SDMA_CONFIGURATION_REGISTER
#define GT64240_ETH_SDMA_COMM ETHERNET0_SDMA_COMMAND_REGISTER
#define GT64240_ETH_INT_MASK ETHERNET0_INTERRUPT_MASK_REGISTER
#define GT64240_ETH_INT_CAUSE ETHERNET0_INTERRUPT_CAUSE_REGISTER
#define GT64240_ETH_CURR_TX_DESC_PTR0 ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER0
#define GT64240_ETH_CURR_TX_DESC_PTR1 ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER1
#define GT64240_ETH_1ST_RX_DESC_PTR0 ETHERNET0_FIRST_RX_DESCRIPTOR_POINTER0
#define GT64240_ETH_CURR_RX_DESC_PTR0 ETHERNET0_CURRENT_RX_DESCRIPTOR_POINTER0
#define GT64240_ETH_HASH_TBL_PTR ETHERNET0_HASH_TABLE_POINTER_REGISTER
/* Turn on NAPI by default */
#define GT64240_NAPI 1
/* Some 64240 settings that SHOULD eventually be setup in PROM monitor: */
/* (Board-specific to the DSL3224 Rev A board ONLY!) */
#define D3224_MPP_CTRL0_SETTING 0x66669900
#define D3224_MPP_CTRL1_SETTING 0x00000000
#define D3224_MPP_CTRL2_SETTING 0x00887700
#define D3224_MPP_CTRL3_SETTING 0x00000044
#define D3224_GPP_IO_CTRL_SETTING 0x0000e800
#define D3224_GPP_LEVEL_CTRL_SETTING 0xf001f703
#define D3224_GPP_VALUE_SETTING 0x00000000
/* Keep the ring sizes a power of two for efficiency. */
//-#define TX_RING_SIZE 16
#define TX_RING_SIZE 64 /* TESTING !!! */
#define RX_RING_SIZE 32
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
#define RX_HASH_TABLE_SIZE 16384
#define HASH_HOP_NUMBER 12
#define NUM_INTERFACES 3
#define GT64240ETH_TX_TIMEOUT HZ/4
#define MIPS_GT64240_BASE 0xf4000000
#define GT64240_ETH0_BASE (MIPS_GT64240_BASE + GT64240_ETH_PORT_CONFIG)
#define GT64240_ETH1_BASE (GT64240_ETH0_BASE + GT64240_ETH_IO_SIZE)
#define GT64240_ETH2_BASE (GT64240_ETH1_BASE + GT64240_ETH_IO_SIZE)
#if defined(CONFIG_MIPS_DSL3224)
#define GT64240_ETHER0_IRQ 4
#define GT64240_ETHER1_IRQ 4
#else
#define GT64240_ETHER0_IRQ -1
#define GT64240_ETHER1_IRQ -1
#endif
#define REV_GT64240 0x1
#define REV_GT64240A 0x10
#define GT64240ETH_READ(gp, offset) \
GT_READ((gp)->port_offset + (offset))
#define GT64240ETH_WRITE(gp, offset, data) \
GT_WRITE((gp)->port_offset + (offset), (data))
#define GT64240ETH_SETBIT(gp, offset, bits) \
GT64240ETH_WRITE((gp), (offset), \
GT64240ETH_READ((gp), (offset)) | (bits))
#define GT64240ETH_CLRBIT(gp, offset, bits) \
GT64240ETH_WRITE((gp), (offset), \
GT64240ETH_READ((gp), (offset)) & ~(bits))
#define GT64240_READ(ofs) GT_READ(ofs)
#define GT64240_WRITE(ofs, data) GT_WRITE((ofs), (data))
/* Bit definitions of the SMI Reg */
enum {
smirDataMask = 0xffff,
smirPhyAdMask = 0x1f << 16,
smirPhyAdBit = 16,
smirRegAdMask = 0x1f << 21,
smirRegAdBit = 21,
smirOpCode = 1 << 26,
smirReadValid = 1 << 27,
smirBusy = 1 << 28
};
/* Bit definitions of the Port Config Reg */
enum pcr_bits {
pcrPM = 1 << 0,
pcrRBM = 1 << 1,
pcrPBF = 1 << 2,
pcrEN = 1 << 7,
pcrLPBKMask = 0x3 << 8,
pcrLPBKBit = 1 << 8,
pcrFC = 1 << 10,
pcrHS = 1 << 12,
pcrHM = 1 << 13,
pcrHDM = 1 << 14,
pcrHD = 1 << 15,
pcrISLMask = 0x7 << 28,
pcrISLBit = 28,
pcrACCS = 1 << 31
};
/* Bit definitions of the Port Config Extend Reg */
enum pcxr_bits {
pcxrIGMP = 1,
pcxrSPAN = 2,
pcxrPAR = 4,
pcxrPRIOtxMask = 0x7 << 3,
pcxrPRIOtxBit = 3,
pcxrPRIOrxMask = 0x3 << 6,
pcxrPRIOrxBit = 6,
pcxrPRIOrxOverride = 1 << 8,
pcxrDPLXen = 1 << 9,
pcxrFCTLen = 1 << 10,
pcxrFLP = 1 << 11,
pcxrFCTL = 1 << 12,
pcxrMFLMask = 0x3 << 14,
pcxrMFLBit = 14,
pcxrMIBclrMode = 1 << 16,
pcxrSpeed = 1 << 18,
pcxrSpeeden = 1 << 19,
pcxrRMIIen = 1 << 20,
pcxrDSCPen = 1 << 21
};
/* Bit definitions of the Port Command Reg */
enum pcmr_bits {
pcmrFJ = 1 << 15
};
/* Bit definitions of the Port Status Reg */
enum psr_bits {
psrSpeed = 1,
psrDuplex = 2,
psrFctl = 4,
psrLink = 8,
psrPause = 1 << 4,
psrTxLow = 1 << 5,
psrTxHigh = 1 << 6,
psrTxInProg = 1 << 7
};
/* Bit definitions of the SDMA Config Reg */
enum sdcr_bits {
sdcrRCMask = 0xf << 2,
sdcrRCBit = 2,
sdcrBLMR = 1 << 6,
sdcrBLMT = 1 << 7,
sdcrPOVR = 1 << 8,
sdcrRIFB = 1 << 9,
sdcrBSZMask = 0x3 << 12,
sdcrBSZBit = 12
};
/* Bit definitions of the SDMA Command Reg */
enum sdcmr_bits {
sdcmrERD = 1 << 7,
sdcmrAR = 1 << 15,
sdcmrSTDH = 1 << 16,
sdcmrSTDL = 1 << 17,
sdcmrTXDH = 1 << 23,
sdcmrTXDL = 1 << 24,
sdcmrAT = 1 << 31
};
/* Bit definitions of the Interrupt Cause Reg */
enum icr_bits {
icrRxBuffer = 1,
icrTxBufferHigh = 1 << 2,
icrTxBufferLow = 1 << 3,
icrTxEndHigh = 1 << 6,
icrTxEndLow = 1 << 7,
icrRxError = 1 << 8,
icrTxErrorHigh = 1 << 10,
icrTxErrorLow = 1 << 11,
icrRxOVR = 1 << 12,
icrTxUdr = 1 << 13,
icrRxBufferQ0 = 1 << 16,
icrRxBufferQ1 = 1 << 17,
icrRxBufferQ2 = 1 << 18,
icrRxBufferQ3 = 1 << 19,
icrRxErrorQ0 = 1 << 20,
icrRxErrorQ1 = 1 << 21,
icrRxErrorQ2 = 1 << 22,
icrRxErrorQ3 = 1 << 23,
icrMIIPhySTC = 1 << 28,
icrSMIdone = 1 << 29,
icrEtherIntSum = 1 << 31
};
/* The Rx and Tx descriptor lists. */
#ifdef __LITTLE_ENDIAN
typedef struct {
u32 cmdstat;
u16 reserved; //-prk21aug01 u32 reserved:16;
u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
u32 buff_ptr;
u32 next;
} gt64240_td_t;
typedef struct {
u32 cmdstat;
u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
u16 buff_sz; //-prk21aug01 u32 buff_sz:16;
u32 buff_ptr;
u32 next;
} gt64240_rd_t;
#elif defined(__BIG_ENDIAN)
typedef struct {
u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
u16 reserved; //-prk21aug01 u32 reserved:16;
u32 cmdstat;
u32 next;
u32 buff_ptr;
} gt64240_td_t;
typedef struct {
u16 buff_sz; //-prk21aug01 u32 buff_sz:16;
u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
u32 cmdstat;
u32 next;
u32 buff_ptr;
} gt64240_rd_t;
#else
#error Either __BIG_ENDIAN or __LITTLE_ENDIAN must be defined!
#endif
/* Values for the Tx command-status descriptor entry. */
enum td_cmdstat {
txOwn = 1 << 31,
txAutoMode = 1 << 30,
txEI = 1 << 23,
txGenCRC = 1 << 22,
txPad = 1 << 18,
txFirst = 1 << 17,
txLast = 1 << 16,
txErrorSummary = 1 << 15,
txReTxCntMask = 0x0f << 10,
txReTxCntBit = 10,
txCollision = 1 << 9,
txReTxLimit = 1 << 8,
txUnderrun = 1 << 6,
txLateCollision = 1 << 5
};
/* Values for the Rx command-status descriptor entry. */
enum rd_cmdstat {
rxOwn = 1 << 31,
rxAutoMode = 1 << 30,
rxEI = 1 << 23,
rxFirst = 1 << 17,
rxLast = 1 << 16,
rxErrorSummary = 1 << 15,
rxIGMP = 1 << 14,
rxHashExpired = 1 << 13,
rxMissedFrame = 1 << 12,
rxFrameType = 1 << 11,
rxShortFrame = 1 << 8,
rxMaxFrameLen = 1 << 7,
rxOverrun = 1 << 6,
rxCollision = 1 << 4,
rxCRCError = 1
};
/* Bit fields of a Hash Table Entry */
enum hash_table_entry {
hteValid = 1,
hteSkip = 2,
hteRD = 4
};
// The MIB counters
typedef struct {
u32 byteReceived;
u32 byteSent;
u32 framesReceived;
u32 framesSent;
u32 totalByteReceived;
u32 totalFramesReceived;
u32 broadcastFramesReceived;
u32 multicastFramesReceived;
u32 cRCError;
u32 oversizeFrames;
u32 fragments;
u32 jabber;
u32 collision;
u32 lateCollision;
u32 frames64;
u32 frames65_127;
u32 frames128_255;
u32 frames256_511;
u32 frames512_1023;
u32 frames1024_MaxSize;
u32 macRxError;
u32 droppedFrames;
u32 outMulticastFrames;
u32 outBroadcastFrames;
u32 undersizeFrames;
} mib_counters_t;
struct gt64240_private {
gt64240_rd_t *rx_ring;
gt64240_td_t *tx_ring;
// The Rx and Tx rings must be 16-byte aligned
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
char *hash_table;
// The Hash Table must be 8-byte aligned
dma_addr_t hash_table_dma;
int hash_mode;
// The Rx buffers must be 8-byte aligned
char *rx_buff;
dma_addr_t rx_buff_dma;
// Tx buffers (tx_skbuff[i]->data) with less than 8 bytes
// of payload must be 8-byte aligned
struct sk_buff *tx_skbuff[TX_RING_SIZE];
int rx_next_out; /* The next free ring entry to receive */
int tx_next_in; /* The next free ring entry to send */
int tx_next_out; /* The last ring entry the ISR processed */
int tx_count; /* current # of pkts waiting to be sent in Tx ring */
int intr_work_done; /* number of Rx and Tx pkts processed in the isr */
int tx_full; /* Tx ring is full */
mib_counters_t mib;
struct net_device_stats stats;
int io_size;
int port_num; // 0 or 1
u32 port_offset;
int phy_addr; // PHY address
u32 last_psr; // last value of the port status register
int options; /* User-settable misc. driver options. */
int drv_flags;
spinlock_t lock; /* Serialise access to device */
struct mii_if_info mii_if;
u32 msg_enable;
};
#endif /* _GT64240ETH_H */
......@@ -28,7 +28,6 @@
* gt96100_cleanup_module(), and other general code cleanups
* <stevel@mvista.com>.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
......@@ -66,10 +65,6 @@ static int gt96100_add_hash_entry(struct net_device *dev,
static void read_mib_counters(struct gt96100_private *gp);
static int read_MII(int phy_addr, u32 reg);
static int write_MII(int phy_addr, u32 reg, u16 data);
#if 0
static void dump_tx_ring(struct net_device *dev);
static void dump_rx_ring(struct net_device *dev);
#endif
static int gt96100_init_module(void);
static void gt96100_cleanup_module(void);
static void dump_MII(int dbg_lvl, struct net_device *dev);
......@@ -84,7 +79,7 @@ static void abort(struct net_device *dev, u32 abort_bits);
static void hard_stop(struct net_device *dev);
static void enable_ether_irq(struct net_device *dev);
static void disable_ether_irq(struct net_device *dev);
static int gt96100_probe1(int port_num);
static int gt96100_probe1(struct pci_dev *pci, int port_num);
static void reset_tx(struct net_device *dev);
static void reset_rx(struct net_device *dev);
static int gt96100_check_tx_consistent(struct gt96100_private *gp);
......@@ -164,13 +159,11 @@ chip_name(int chip_rev)
/*
DMA memory allocation, derived from pci_alloc_consistent.
*/
static void *
dmaalloc(size_t size, dma_addr_t *dma_handle)
static void * dmaalloc(size_t size, dma_addr_t *dma_handle)
{
void *ret;
ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA,
get_order(size));
ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, get_order(size));
if (ret != NULL) {
dma_cache_inv((unsigned long)ret, size);
......@@ -184,17 +177,13 @@ dmaalloc(size_t size, dma_addr_t *dma_handle)
return ret;
}
static void
dmafree(size_t size, void *vaddr)
static void dmafree(size_t size, void *vaddr)
{
vaddr = (void*)KSEG0ADDR(vaddr);
free_pages((unsigned long)vaddr, get_order(size));
}
static void
gt96100_delay(int ms)
static void gt96100_delay(int ms)
{
if (in_interrupt())
return;
......@@ -327,34 +316,6 @@ write_MII(int phy_addr, u32 reg, u16 data)
return 0;
}
#if 0
// These routines work, just disabled to avoid compile warnings
static void
dump_tx_ring(struct net_device *dev)
{
struct gt96100_private *gp = netdev_priv(dev);
int i;
dbg(0, "%s: txno/txni/cnt=%d/%d/%d\n", __FUNCTION__,
gp->tx_next_out, gp->tx_next_in, gp->tx_count);
for (i=0; i<TX_RING_SIZE; i++)
dump_tx_desc(0, dev, i);
}
static void
dump_rx_ring(struct net_device *dev)
{
struct gt96100_private *gp = netdev_priv(dev);
int i;
dbg(0, "%s: rxno=%d\n", __FUNCTION__, gp->rx_next_out);
for (i=0; i<RX_RING_SIZE; i++)
dump_rx_desc(0, dev, i);
}
#endif
static void
dump_MII(int dbg_lvl, struct net_device *dev)
{
......@@ -647,23 +608,19 @@ disable_ether_irq(struct net_device *dev)
/*
* Init GT96100 ethernet controller driver
*/
int gt96100_init_module(void)
static int gt96100_init_module(void)
{
struct pci_dev *pci;
int i, retval=0;
u16 vendor_id, device_id;
u32 cpuConfig;
#ifndef CONFIG_MIPS_GT96100ETH
return -ENODEV;
#endif
// probe for GT96100 by reading PCI0 vendor/device ID register
pcibios_read_config_word(0, 0, PCI_VENDOR_ID, &vendor_id);
pcibios_read_config_word(0, 0, PCI_DEVICE_ID, &device_id);
if (vendor_id != PCI_VENDOR_ID_MARVELL ||
(device_id != PCI_DEVICE_ID_MARVELL_GT96100 &&
device_id != PCI_DEVICE_ID_MARVELL_GT96100A)) {
/*
* Stupid probe because this really isn't a PCI device
*/
if (!(pci = pci_find_device(PCI_VENDOR_ID_MARVELL,
PCI_DEVICE_ID_MARVELL_GT96100, NULL)) &&
!(pci = pci_find_device(PCI_VENDOR_ID_MARVELL,
PCI_DEVICE_ID_MARVELL_GT96100A, NULL))) {
printk(KERN_ERR __FILE__ ": GT96100 not found!\n");
return -ENODEV;
}
......@@ -675,17 +632,13 @@ int gt96100_init_module(void)
return -ENODEV;
}
for (i=0; i < NUM_INTERFACES; i++) {
retval |= gt96100_probe1(i);
}
for (i=0; i < NUM_INTERFACES; i++)
retval |= gt96100_probe1(pci, i);
return retval;
}
static int __init
gt96100_probe1(int port_num)
static int __init gt96100_probe1(struct pci_dev *pci, int port_num)
{
struct gt96100_private *gp = NULL;
struct gt96100_if_t *gtif = &gt96100_iflist[port_num];
......@@ -696,19 +649,19 @@ gt96100_probe1(int port_num)
struct net_device *dev = NULL;
if (gtif->irq < 0) {
printk(KERN_ERR "%s: irq unknown - probing not supported\n", __FUNCTION_);
printk(KERN_ERR "%s: irq unknown - probing not supported\n",
__FUNCTION__);
return -ENODEV;
}
pcibios_read_config_byte(0, 0, PCI_REVISION_ID, &chip_rev);
pci_read_config_byte(pci, PCI_REVISION_ID, &chip_rev);
if (chip_rev >= REV_GT96100A_1) {
phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
phy_addr = (phyAD >> (5*port_num)) & 0x1f;
} else {
/*
* not sure what's this about -- probably
* a gt bug
* not sure what's this about -- probably a gt bug
*/
phy_addr = port_num;
phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
......@@ -831,6 +784,7 @@ gt96100_probe1(int port_num)
free_netdev (dev);
out:
release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
err("%s failed. Returns %d\n", __FUNCTION__, retval);
return retval;
}
......@@ -1102,6 +1056,7 @@ gt96100_close(struct net_device *dev)
}
free_irq(dev->irq, dev);
return 0;
}
......@@ -1312,10 +1267,11 @@ gt96100_tx_complete(struct net_device *dev, u32 status)
cmdstat, nextOut);
if (cmdstat & (u32)txOwn) {
//dump_tx_ring(dev);
// DMA is not finished writing descriptor???
// Leave and come back later to pick-up where
// we left off.
/*
* DMA is not finished writing descriptor???
* Leave and come back later to pick-up where
* we left off.
*/
break;
}
......@@ -1342,7 +1298,8 @@ gt96100_tx_complete(struct net_device *dev, u32 status)
gp->tx_full = 0;
if (gp->last_psr & psrLink) {
netif_wake_queue(dev);
dbg(2, "%s: Tx Ring was full, queue waked\n", __FUNCTION_);
dbg(2, "%s: Tx Ring was full, queue waked\n",
__FUNCTION__);
}
}
......@@ -1425,12 +1382,12 @@ gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if ((psr & psrLink) && !gp->tx_full &&
netif_queue_stopped(dev)) {
dbg(0, ": Link up, waking queue.\n",
__FUNCTION_);
dbg(0, "%s: Link up, waking queue.\n",
__FUNCTION__);
netif_wake_queue(dev);
} else if (!(psr & psrLink) &&
!netif_queue_stopped(dev)) {
dbg(0, "Link down, stopping queue.\n",
dbg(0, "%s: Link down, stopping queue.\n",
__FUNCTION__);
netif_stop_queue(dev);
}
......@@ -1569,8 +1526,8 @@ static void gt96100_cleanup_module(void)
for (i=0; i<NUM_INTERFACES; i++) {
struct gt96100_if_t *gtif = &gt96100_iflist[i];
if (gtif->dev != NULL) {
struct gt96100_private *gp =
(struct gt96100_private *)gtif->dev->priv;
struct gt96100_private *gp = (struct gt96100_private *)
netdev_priv(gtif->dev);
unregister_netdev(gtif->dev);
dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma);
dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
......@@ -1583,9 +1540,6 @@ static void gt96100_cleanup_module(void)
}
}
#ifndef MODULE
static int __init gt96100_setup(char *options)
{
char *this_opt;
......@@ -1610,9 +1564,6 @@ static int __init gt96100_setup(char *options)
__setup("gt96100eth=", gt96100_setup);
#endif /* !MODULE */
module_init(gt96100_init_module);
module_exit(gt96100_cleanup_module);
......
/*
* drivers/net/mv64340_eth.c - Driver for MV64340X ethernet ports
* Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
*
* Based on the 64360 driver from:
* Copyright (C) 2002 rabeeh@galileo.co.il
*
* Copyright (C) 2003 PMC-Sierra, Inc.,
* written by Manish Lachwani (lachwani@pmc-sierra.com)
*
* Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/fcntl.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ip.h>
#include <linux/init.h>
#include <linux/in.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/types.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include "mv643xx_eth.h"
/*
* The first part is the high level driver of the gigE ethernet ports.
*/
/* Definition for configuring driver */
#undef MV64340_RX_QUEUE_FILL_ON_TASK
/* Constants */
#define EXTRA_BYTES 32
#define WRAP ETH_HLEN + 2 + 4 + 16
#define BUFFER_MTU dev->mtu + WRAP
#define INT_CAUSE_UNMASK_ALL 0x0007ffff
#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
#ifdef MV64340_RX_FILL_ON_TASK
#define INT_CAUSE_MASK_ALL 0x00000000
#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
#endif
/* Static function declarations */
static int mv64340_eth_real_open(struct net_device *);
static int mv64340_eth_real_stop(struct net_device *);
static int mv64340_eth_change_mtu(struct net_device *, int);
static struct net_device_stats *mv64340_eth_get_stats(struct net_device *);
static void eth_port_init_mac_tables(unsigned int eth_port_num);
#ifdef MV64340_NAPI
static int mv64340_poll(struct net_device *dev, int *budget);
#endif
unsigned char prom_mac_addr_base[6];
unsigned long mv64340_sram_base;
/*
* Changes MTU (maximum transfer unit) of the gigabit ethenret port
*
* Input : pointer to ethernet interface network device structure
* new mtu size
* Output : 0 upon success, -EINVAL upon failure
*/
static int mv64340_eth_change_mtu(struct net_device *dev, int new_mtu)
{
struct mv64340_private *mp = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&mp->lock, flags);
if ((new_mtu > 9500) || (new_mtu < 64)) {
spin_unlock_irqrestore(&mp->lock, flags);
return -EINVAL;
}
dev->mtu = new_mtu;
/*
* Stop then re-open the interface. This will allocate RX skb's with
* the new MTU.
* There is a possible danger that the open will not successed, due
* to memory is full, which might fail the open function.
*/
if (netif_running(dev)) {
if (mv64340_eth_real_stop(dev))
printk(KERN_ERR
"%s: Fatal error on stopping device\n",
dev->name);
if (mv64340_eth_real_open(dev))
printk(KERN_ERR
"%s: Fatal error on opening device\n",
dev->name);
}
spin_unlock_irqrestore(&mp->lock, flags);
return 0;
}
/*
* mv64340_eth_rx_task
*
* Fills / refills RX queue on a certain gigabit ethernet port
*
* Input : pointer to ethernet interface network device structure
* Output : N/A
*/
static void mv64340_eth_rx_task(void *data)
{
struct net_device *dev = (struct net_device *) data;
struct mv64340_private *mp = netdev_priv(dev);
struct pkt_info pkt_info;
struct sk_buff *skb;
if (test_and_set_bit(0, &mp->rx_task_busy))
panic("%s: Error in test_set_bit / clear_bit", dev->name);
while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
/* The +8 for buffer allignment and another 32 byte extra */
skb = dev_alloc_skb(BUFFER_MTU + 8 + EXTRA_BYTES);
if (!skb)
/* Better luck next time */
break;
mp->rx_ring_skbs++;
pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
pkt_info.byte_cnt = dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES;
/* Allign buffer to 8 bytes */
if (pkt_info.byte_cnt & ~0x7) {
pkt_info.byte_cnt &= ~0x7;
pkt_info.byte_cnt += 8;
}
pkt_info.buf_ptr =
pci_map_single(0, skb->data,
dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES,
PCI_DMA_FROMDEVICE);
pkt_info.return_info = skb;
if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
printk(KERN_ERR
"%s: Error allocating RX Ring\n", dev->name);
break;
}
skb_reserve(skb, 2);
}
clear_bit(0, &mp->rx_task_busy);
/*
* If RX ring is empty of SKB, set a timer to try allocating
* again in a later time .
*/
if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) {
printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
/* After 100mSec */
mp->timeout.expires = jiffies + (HZ / 10);
add_timer(&mp->timeout);
mp->rx_timer_flag = 1;
}
#if MV64340_RX_QUEUE_FILL_ON_TASK
else {
/* Return interrupts */
MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(mp->port_num),
INT_CAUSE_UNMASK_ALL);
}
#endif
}
/*
* mv64340_eth_rx_task_timer_wrapper
*
* Timer routine to wake up RX queue filling task. This function is
* used only in case the RX queue is empty, and all alloc_skb has
* failed (due to out of memory event).
*
* Input : pointer to ethernet interface network device structure
* Output : N/A
*/
static void mv64340_eth_rx_task_timer_wrapper(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
struct mv64340_private *mp = netdev_priv(dev);
mp->rx_timer_flag = 0;
mv64340_eth_rx_task((void *) data);
}
/*
* mv64340_eth_update_mac_address
*
* Update the MAC address of the port in the address table
*
* Input : pointer to ethernet interface network device structure
* Output : N/A
*/
static void mv64340_eth_update_mac_address(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
eth_port_init_mac_tables(port_num);
memcpy(mp->port_mac_addr, dev->dev_addr, 6);
eth_port_uc_addr_set(port_num, mp->port_mac_addr);
}
/*
* mv64340_eth_set_rx_mode
*
* Change from promiscuos to regular rx mode
*
* Input : pointer to ethernet interface network device structure
* Output : N/A
*/
static void mv64340_eth_set_rx_mode(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
if (dev->flags & IFF_PROMISC) {
ethernet_set_config_reg
(mp->port_num,
ethernet_get_config_reg(mp->port_num) |
ETH_UNICAST_PROMISCUOUS_MODE);
} else {
ethernet_set_config_reg
(mp->port_num,
ethernet_get_config_reg(mp->port_num) &
~(unsigned int) ETH_UNICAST_PROMISCUOUS_MODE);
}
}
/*
* mv64340_eth_set_mac_address
*
* Change the interface's mac address.
* No special hardware thing should be done because interface is always
* put in promiscuous mode.
*
* Input : pointer to ethernet interface network device structure and
* a pointer to the designated entry to be added to the cache.
* Output : zero upon success, negative upon failure
*/
static int mv64340_eth_set_mac_address(struct net_device *dev, void *addr)
{
int i;
for (i = 0; i < 6; i++)
/* +2 is for the offset of the HW addr type */
dev->dev_addr[i] = ((unsigned char *) addr)[i + 2];
mv64340_eth_update_mac_address(dev);
return 0;
}
/*
* mv64340_eth_tx_timeout
*
* Called upon a timeout on transmitting a packet
*
* Input : pointer to ethernet interface network device structure.
* Output : N/A
*/
static void mv64340_eth_tx_timeout(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
printk(KERN_INFO "%s: TX timeout ", dev->name);
/* Do the reset outside of interrupt context */
schedule_work(&mp->tx_timeout_task);
}
/*
* mv64340_eth_tx_timeout_task
*
* Actual routine to reset the adapter when a timeout on Tx has occurred
*/
static void mv64340_eth_tx_timeout_task(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
netif_device_detach(dev);
eth_port_reset(mp->port_num);
eth_port_start(mp);
netif_device_attach(dev);
}
/*
* mv64340_eth_free_tx_queue
*
* Input : dev - a pointer to the required interface
*
* Output : 0 if was able to release skb , nonzero otherwise
*/
static int mv64340_eth_free_tx_queue(struct net_device *dev,
unsigned int eth_int_cause_ext)
{
struct mv64340_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &mp->stats;
struct pkt_info pkt_info;
int released = 1;
if (!(eth_int_cause_ext & (BIT0 | BIT8)))
return released;
spin_lock(&mp->lock);
/* Check only queue 0 */
while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
if (pkt_info.cmd_sts & BIT0) {
printk("%s: Error in TX\n", dev->name);
stats->tx_errors++;
}
/*
* If return_info is different than 0, release the skb.
* The case where return_info is not 0 is only in case
* when transmitted a scatter/gather packet, where only
* last skb releases the whole chain.
*/
if (pkt_info.return_info) {
dev_kfree_skb_irq((struct sk_buff *)
pkt_info.return_info);
released = 0;
if (skb_shinfo(pkt_info.return_info)->nr_frags)
pci_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt, PCI_DMA_TODEVICE);
if (mp->tx_ring_skbs != 1)
mp->tx_ring_skbs--;
} else
pci_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt, PCI_DMA_TODEVICE);
/*
* Decrement the number of outstanding skbs counter on
* the TX queue.
*/
if (mp->tx_ring_skbs == 0)
panic("ERROR - TX outstanding SKBs counter is corrupted");
}
spin_unlock(&mp->lock);
return released;
}
/*
* mv64340_eth_receive
*
* This function is forward packets that are received from the port's
* queues toward kernel core or FastRoute them to another interface.
*
* Input : dev - a pointer to the required interface
* max - maximum number to receive (0 means unlimted)
*
* Output : number of served packets
*/
#ifdef MV64340_NAPI
static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max,
int budget)
#else
static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max)
#endif
{
struct mv64340_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &mp->stats;
unsigned int received_packets = 0;
struct sk_buff *skb;
struct pkt_info pkt_info;
#ifdef MV64340_NAPI
while (eth_port_receive(mp, &pkt_info) == ETH_OK && budget > 0) {
#else
while ((--max) && eth_port_receive(mp, &pkt_info) == ETH_OK) {
#endif
mp->rx_ring_skbs--;
received_packets++;
#ifdef MV64340_NAPI
budget--;
#endif
/* Update statistics. Note byte count includes 4 byte CRC count */
stats->rx_packets++;
stats->rx_bytes += pkt_info.byte_cnt;
skb = (struct sk_buff *) pkt_info.return_info;
/*
* In case received a packet without first / last bits on OR
* the error summary bit is on, the packets needs to be dropeed.
*/
if (((pkt_info.cmd_sts
& (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
(ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
|| (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
stats->rx_dropped++;
if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC |
ETH_RX_LAST_DESC)) !=
(ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {
if (net_ratelimit())
printk(KERN_ERR
"%s: Received packet spread on multiple"
" descriptors\n",
dev->name);
}
if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)
stats->rx_errors++;
dev_kfree_skb_irq(skb);
} else {
/*
* The -4 is for the CRC in the trailer of the
* received packet
*/
skb_put(skb, pkt_info.byte_cnt - 4);
skb->dev = dev;
if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum = htons((pkt_info.cmd_sts
& 0x0007fff8) >> 3);
}
skb->protocol = eth_type_trans(skb, dev);
#ifdef MV64340_NAPI
netif_receive_skb(skb);
#else
netif_rx(skb);
#endif
}
}
return received_packets;
}
/*
* mv64340_eth_int_handler
*
* Main interrupt handler for the gigbit ethernet ports
*
* Input : irq - irq number (not used)
* dev_id - a pointer to the required interface's data structure
* regs - not used
* Output : N/A
*/
static irqreturn_t mv64340_eth_int_handler(int irq, void *dev_id,
struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct mv64340_private *mp = netdev_priv(dev);
u32 eth_int_cause, eth_int_cause_ext = 0;
unsigned int port_num = mp->port_num;
/* Read interrupt cause registers */
eth_int_cause = MV_READ(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num)) &
INT_CAUSE_UNMASK_ALL;
if (eth_int_cause & BIT1)
eth_int_cause_ext =
MV_READ(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
INT_CAUSE_UNMASK_ALL_EXT;
#ifdef MV64340_NAPI
if (!(eth_int_cause & 0x0007fffd)) {
/* Dont ack the Rx interrupt */
#endif
/*
* Clear specific ethernet port intrerrupt registers by
* acknowleding relevant bits.
*/
MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),
~eth_int_cause);
if (eth_int_cause_ext != 0x0)
MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
~eth_int_cause_ext);
/* UDP change : We may need this */
if ((eth_int_cause_ext & 0x0000ffff) &&
(mv64340_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&
(MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1))
netif_wake_queue(dev);
#ifdef MV64340_NAPI
} else {
if (netif_rx_schedule_prep(dev)) {
/* Mask all the interrupts */
MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),0);
MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
__netif_rx_schedule(dev);
}
#else
{
if (eth_int_cause & (BIT2 | BIT11))
mv64340_eth_receive_queue(dev, 0);
/*
* After forwarded received packets to upper layer, add a task
* in an interrupts enabled context that refills the RX ring
* with skb's.
*/
#if MV64340_RX_QUEUE_FILL_ON_TASK
/* Unmask all interrupts on ethernet port */
MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
INT_CAUSE_MASK_ALL);
queue_task(&mp->rx_task, &tq_immediate);
mark_bh(IMMEDIATE_BH);
#else
mp->rx_task.func(dev);
#endif
#endif
}
/* PHY status changed */
if (eth_int_cause_ext & (BIT16 | BIT20)) {
unsigned int phy_reg_data;
/* Check Link status on ethernet port */
eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
if (!(phy_reg_data & 0x20)) {
netif_stop_queue(dev);
} else {
netif_wake_queue(dev);
/*
* Start all TX queues on ethernet port. This is good in
* case of previous packets where not transmitted, due
* to link down and this command re-enables all TX
* queues.
* Note that it is possible to get a TX resource error
* interrupt after issuing this, since not all TX queues
* are enabled, or has anything to send.
*/
MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 1);
}
}
/*
* If no real interrupt occured, exit.
* This can happen when using gigE interrupt coalescing mechanism.
*/
if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0))
return IRQ_NONE;
return IRQ_HANDLED;
}
#ifdef MV64340_COAL
/*
* eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
*
* DESCRIPTION:
* This routine sets the RX coalescing interrupt mechanism parameter.
* This parameter is a timeout counter, that counts in 64 t_clk
* chunks ; that when timeout event occurs a maskable interrupt
* occurs.
* The parameter is calculated using the tClk of the MV-643xx chip
* , and the required delay of the interrupt in usec.
*
* INPUT:
* unsigned int eth_port_num Ethernet port number
* unsigned int t_clk t_clk of the MV-643xx chip in HZ units
* unsigned int delay Delay in usec
*
* OUTPUT:
* Interrupt coalescing mechanism value is set in MV-643xx chip.
*
* RETURN:
* The interrupt coalescing value set in the gigE port.
*
*/
static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
unsigned int t_clk, unsigned int delay)
{
unsigned int coal = ((t_clk / 1000000) * delay) / 64;
/* Set RX Coalescing mechanism */
MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
((coal & 0x3fff) << 8) |
(MV_READ(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num))
& 0xffc000ff));
return coal;
}
#endif
/*
* eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
*
* DESCRIPTION:
* This routine sets the TX coalescing interrupt mechanism parameter.
* This parameter is a timeout counter, that counts in 64 t_clk
* chunks ; that when timeout event occurs a maskable interrupt
* occurs.
* The parameter is calculated using the t_cLK frequency of the
* MV-643xx chip and the required delay in the interrupt in uSec
*
* INPUT:
* unsigned int eth_port_num Ethernet port number
* unsigned int t_clk t_clk of the MV-643xx chip in HZ units
* unsigned int delay Delay in uSeconds
*
* OUTPUT:
* Interrupt coalescing mechanism value is set in MV-643xx chip.
*
* RETURN:
* The interrupt coalescing value set in the gigE port.
*
*/
static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
unsigned int t_clk, unsigned int delay)
{
unsigned int coal;
coal = ((t_clk / 1000000) * delay) / 64;
/* Set TX Coalescing mechanism */
MV_WRITE(MV64340_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num),
coal << 4);
return coal;
}
/*
* mv64340_eth_open
*
* This function is called when openning the network device. The function
* should initialize all the hardware, initialize cyclic Rx/Tx
* descriptors chain and buffers and allocate an IRQ to the network
* device.
*
* Input : a pointer to the network device structure
*
* Output : zero of success , nonzero if fails.
*/
static int mv64340_eth_open(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
int err = err;
spin_lock_irq(&mp->lock);
err = request_irq(dev->irq, mv64340_eth_int_handler,
SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev);
if (err) {
printk(KERN_ERR "Can not assign IRQ number to MV64340_eth%d\n",
port_num);
err = -EAGAIN;
goto out;
}
if (mv64340_eth_real_open(dev)) {
printk("%s: Error opening interface\n", dev->name);
err = -EBUSY;
goto out_free;
}
spin_unlock_irq(&mp->lock);
return 0;
out_free:
free_irq(dev->irq, dev);
out:
spin_unlock_irq(&mp->lock);
return err;
}
/*
* ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
*
* DESCRIPTION:
* This function prepares a Rx chained list of descriptors and packet
* buffers in a form of a ring. The routine must be called after port
* initialization routine and before port start routine.
* The Ethernet SDMA engine uses CPU bus addresses to access the various
* devices in the system (i.e. DRAM). This function uses the ethernet
* struct 'virtual to physical' routine (set by the user) to set the ring
* with physical addresses.
*
* INPUT:
* struct mv64340_private *mp Ethernet Port Control srtuct.
* int rx_desc_num Number of Rx descriptors
* int rx_buff_size Size of Rx buffer
* unsigned int rx_desc_base_addr Rx descriptors memory area base addr.
* unsigned int rx_buff_base_addr Rx buffer memory area base addr.
*
* OUTPUT:
* The routine updates the Ethernet port control struct with information
* regarding the Rx descriptors and buffers.
*
* RETURN:
* false if the given descriptors memory area is not aligned according to
* Ethernet SDMA specifications.
* true otherwise.
*/
static int ether_init_rx_desc_ring(struct mv64340_private * mp,
unsigned long rx_buff_base_addr)
{
unsigned long buffer_addr = rx_buff_base_addr;
volatile struct eth_rx_desc *p_rx_desc;
int rx_desc_num = mp->rx_ring_size;
unsigned long rx_desc_base_addr = (unsigned long) mp->p_rx_desc_area;
int rx_buff_size = 1536; /* Dummy, will be replaced later */
int i;
p_rx_desc = (struct eth_rx_desc *) rx_desc_base_addr;
/* Rx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
if (rx_buff_base_addr & 0xf)
return 0;
/* Rx buffers are limited to 64K bytes and Minimum size is 8 bytes */
if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
return 0;
/* Rx buffers must be 64-bit aligned. */
if ((rx_buff_base_addr + rx_buff_size) & 0x7)
return 0;
/* initialize the Rx descriptors ring */
for (i = 0; i < rx_desc_num; i++) {
p_rx_desc[i].buf_size = rx_buff_size;
p_rx_desc[i].byte_cnt = 0x0000;
p_rx_desc[i].cmd_sts =
ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);
p_rx_desc[i].buf_ptr = buffer_addr;
mp->rx_skb[i] = NULL;
buffer_addr += rx_buff_size;
}
/* Save Rx desc pointer to driver struct. */
mp->rx_curr_desc_q = 0;
mp->rx_used_desc_q = 0;
mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
mp->port_rx_queue_command |= 1;
return 1;
}
/*
* ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
*
* DESCRIPTION:
* This function prepares a Tx chained list of descriptors and packet
* buffers in a form of a ring. The routine must be called after port
* initialization routine and before port start routine.
* The Ethernet SDMA engine uses CPU bus addresses to access the various
* devices in the system (i.e. DRAM). This function uses the ethernet
* struct 'virtual to physical' routine (set by the user) to set the ring
* with physical addresses.
*
* INPUT:
* struct mv64340_private *mp Ethernet Port Control srtuct.
* int tx_desc_num Number of Tx descriptors
* int tx_buff_size Size of Tx buffer
* unsigned int tx_desc_base_addr Tx descriptors memory area base addr.
*
* OUTPUT:
* The routine updates the Ethernet port control struct with information
* regarding the Tx descriptors and buffers.
*
* RETURN:
* false if the given descriptors memory area is not aligned according to
* Ethernet SDMA specifications.
* true otherwise.
*/
static int ether_init_tx_desc_ring(struct mv64340_private *mp)
{
unsigned long tx_desc_base_addr = (unsigned long) mp->p_tx_desc_area;
int tx_desc_num = mp->tx_ring_size;
struct eth_tx_desc *p_tx_desc;
int i;
/* Tx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
if (tx_desc_base_addr & 0xf)
return 0;
/* save the first desc pointer to link with the last descriptor */
p_tx_desc = (struct eth_tx_desc *) tx_desc_base_addr;
/* Initialize the Tx descriptors ring */
for (i = 0; i < tx_desc_num; i++) {
p_tx_desc[i].byte_cnt = 0x0000;
p_tx_desc[i].l4i_chk = 0x0000;
p_tx_desc[i].cmd_sts = 0x00000000;
p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);
p_tx_desc[i].buf_ptr = 0x00000000;
mp->tx_skb[i] = NULL;
}
/* Set Tx desc pointer in driver struct. */
mp->tx_curr_desc_q = 0;
mp->tx_used_desc_q = 0;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
mp->tx_first_desc_q = 0;
#endif
/* Init Tx ring base and size parameters */
mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
/* Add the queue to the list of Tx queues of this port */
mp->port_tx_queue_command |= 1;
return 1;
}
/* Helper function for mv64340_eth_open */
static int mv64340_eth_real_open(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
u32 phy_reg_data;
unsigned int size;
/* Stop RX Queues */
MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
0x0000ff00);
/* Clear the ethernet port interrupts */
MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
/* Unmask RX buffer and TX end interrupt */
MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL);
/* Unmask phy and link status changes interrupts */
MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL_EXT);
/* Set the MAC Address */
memcpy(mp->port_mac_addr, dev->dev_addr, 6);
eth_port_init(mp);
INIT_WORK(&mp->rx_task, (void (*)(void *)) mv64340_eth_rx_task, dev);
memset(&mp->timeout, 0, sizeof(struct timer_list));
mp->timeout.function = mv64340_eth_rx_task_timer_wrapper;
mp->timeout.data = (unsigned long) dev;
mp->rx_task_busy = 0;
mp->rx_timer_flag = 0;
/* Allocate TX ring */
mp->tx_ring_skbs = 0;
mp->tx_ring_size = MV64340_TX_QUEUE_SIZE;
size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
mp->tx_desc_area_size = size;
/* Assumes allocated ring is 16 bytes alligned */
mp->p_tx_desc_area = pci_alloc_consistent(NULL, size, &mp->tx_desc_dma);
if (!mp->p_tx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
dev->name, size);
return -ENOMEM;
}
memset((void *) mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
/* Dummy will be replaced upon real tx */
ether_init_tx_desc_ring(mp);
/* Allocate RX ring */
/* Meantime RX Ring are fixed - but must be configurable by user */
mp->rx_ring_size = MV64340_RX_QUEUE_SIZE;
mp->rx_ring_skbs = 0;
size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
mp->rx_desc_area_size = size;
/* Assumes allocated ring is 16 bytes aligned */
mp->p_rx_desc_area = pci_alloc_consistent(NULL, size, &mp->rx_desc_dma);
if (!mp->p_rx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
dev->name, size);
printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
dev->name);
pci_free_consistent(0, mp->tx_desc_area_size,
(void *) mp->p_tx_desc_area,
mp->tx_desc_dma);
return -ENOMEM;
}
memset(mp->p_rx_desc_area, 0, size);
if (!(ether_init_rx_desc_ring(mp, 0)))
panic("%s: Error initializing RX Ring", dev->name);
mv64340_eth_rx_task(dev); /* Fill RX ring with skb's */
eth_port_start(mp);
/* Interrupt Coalescing */
#ifdef MV64340_COAL
mp->rx_int_coal =
eth_port_set_rx_coal(port_num, 133000000, MV64340_RX_COAL);
#endif
mp->tx_int_coal =
eth_port_set_tx_coal (port_num, 133000000, MV64340_TX_COAL);
/* Increase the Rx side buffer size */
MV_WRITE (MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num), (0x5 << 17) |
(MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num))
& 0xfff1ffff));
/* Check Link status on phy */
eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
if (!(phy_reg_data & 0x20))
netif_stop_queue(dev);
else
netif_start_queue(dev);
return 0;
}
static void mv64340_eth_free_tx_rings(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
unsigned int curr;
/* Stop Tx Queues */
MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
0x0000ff00);
/* Free TX rings */
/* Free outstanding skb's on TX rings */
for (curr = 0;
(mp->tx_ring_skbs) && (curr < MV64340_TX_QUEUE_SIZE);
curr++) {
if (mp->tx_skb[curr]) {
dev_kfree_skb(mp->tx_skb[curr]);
mp->tx_ring_skbs--;
}
}
if (mp->tx_ring_skbs != 0)
printk("%s: Error on Tx descriptor free - could not free %d"
" descriptors\n", dev->name,
mp->tx_ring_skbs);
pci_free_consistent(0, mp->tx_desc_area_size,
(void *) mp->p_tx_desc_area, mp->tx_desc_dma);
}
static void mv64340_eth_free_rx_rings(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
int curr;
/* Stop RX Queues */
MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
0x0000ff00);
/* Free RX rings */
/* Free preallocated skb's on RX rings */
for (curr = 0;
mp->rx_ring_skbs && (curr < MV64340_RX_QUEUE_SIZE);
curr++) {
if (mp->rx_skb[curr]) {
dev_kfree_skb(mp->rx_skb[curr]);
mp->rx_ring_skbs--;
}
}
if (mp->rx_ring_skbs != 0)
printk(KERN_ERR
"%s: Error in freeing Rx Ring. %d skb's still"
" stuck in RX Ring - ignoring them\n", dev->name,
mp->rx_ring_skbs);
pci_free_consistent(0, mp->rx_desc_area_size,
(void *) mp->p_rx_desc_area,
mp->rx_desc_dma);
}
/*
* mv64340_eth_stop
*
* This function is used when closing the network device.
* It updates the hardware,
* release all memory that holds buffers and descriptors and release the IRQ.
* Input : a pointer to the device structure
* Output : zero if success , nonzero if fails
*/
/* Helper function for mv64340_eth_stop */
static int mv64340_eth_real_stop(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
netif_stop_queue(dev);
mv64340_eth_free_tx_rings(dev);
mv64340_eth_free_rx_rings(dev);
eth_port_reset(mp->port_num);
/* Disable ethernet port interrupts */
MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
/* Mask RX buffer and TX end interrupt */
MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num), 0);
/* Mask phy and link status changes interrupts */
MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
return 0;
}
static int mv64340_eth_stop(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
spin_lock_irq(&mp->lock);
mv64340_eth_real_stop(dev);
free_irq(dev->irq, dev);
spin_unlock_irq(&mp->lock);
return 0;
}
#ifdef MV64340_NAPI
static void mv64340_tx(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
struct pkt_info pkt_info;
while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
if (pkt_info.return_info) {
dev_kfree_skb_irq((struct sk_buff *)
pkt_info.return_info);
if (skb_shinfo(pkt_info.return_info)->nr_frags)
pci_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt,
PCI_DMA_TODEVICE);
if (mp->tx_ring_skbs != 1)
mp->tx_ring_skbs--;
} else
pci_unmap_page(NULL, pkt_info.buf_ptr, pkt_info.byte_cnt,
PCI_DMA_TODEVICE);
}
if (netif_queue_stopped(dev) &&
MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1)
netif_wake_queue(dev);
}
/*
* mv64340_poll
*
* This function is used in case of NAPI
*/
static int mv64340_poll(struct net_device *dev, int *budget)
{
struct mv64340_private *mp = netdev_priv(dev);
int done = 1, orig_budget, work_done;
unsigned int port_num = mp->port_num;
unsigned long flags;
#ifdef MV64340_TX_FAST_REFILL
if (++mp->tx_clean_threshold > 5) {
spin_lock_irqsave(&mp->lock, flags);
mv64340_tx(dev);
mp->tx_clean_threshold = 0;
spin_unlock_irqrestore(&mp->lock, flags);
}
#endif
if ((u32)(MV_READ(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) != (u32)mp->rx_used_desc_q) {
orig_budget = *budget;
if (orig_budget > dev->quota)
orig_budget = dev->quota;
work_done = mv64340_eth_receive_queue(dev, 0, orig_budget);
mp->rx_task.func(dev);
*budget -= work_done;
dev->quota -= work_done;
if (work_done >= orig_budget)
done = 0;
}
if (done) {
spin_lock_irqsave(&mp->lock, flags);
__netif_rx_complete(dev);
MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),0);
MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),0);
MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL);
MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL_EXT);
spin_unlock_irqrestore(&mp->lock, flags);
}
return done ? 0 : 1;
}
#endif
/*
* mv64340_eth_start_xmit
*
* This function is queues a packet in the Tx descriptor for
* required port.
*
* Input : skb - a pointer to socket buffer
* dev - a pointer to the required port
*
* Output : zero upon success
*/
static int mv64340_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &mp->stats;
ETH_FUNC_RET_STATUS status;
unsigned long flags;
struct pkt_info pkt_info;
if (netif_queue_stopped(dev)) {
printk(KERN_ERR
"%s: Tried sending packet when interface is stopped\n",
dev->name);
return 1;
}
/* This is a hard error, log it. */
if ((MV64340_TX_QUEUE_SIZE - mp->tx_ring_skbs) <=
(skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
printk(KERN_ERR
"%s: Bug in mv64340_eth - Trying to transmit when"
" queue full !\n", dev->name);
return 1;
}
/* Paranoid check - this shouldn't happen */
if (skb == NULL) {
stats->tx_dropped++;
return 1;
}
spin_lock_irqsave(&mp->lock, flags);
/* Update packet info data structure -- DMA owned, first last */
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
if (!skb_shinfo(skb)->nr_frags || (skb_shinfo(skb)->nr_frags > 3)) {
#endif
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
pkt_info.byte_cnt = skb->len;
pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
PCI_DMA_TODEVICE);
pkt_info.return_info = skb;
status = eth_port_send(mp, &pkt_info);
if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
printk(KERN_ERR "%s: Error on transmitting packet\n",
dev->name);
mp->tx_ring_skbs++;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
} else {
unsigned int frag;
u32 ipheader;
/* first frag which is skb header */
pkt_info.byte_cnt = skb_headlen(skb);
pkt_info.buf_ptr = pci_map_single(0, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE);
pkt_info.return_info = 0;
ipheader = skb->nh.iph->ihl << 11;
pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
ETH_GEN_TCP_UDP_CHECKSUM |
ETH_GEN_IP_V_4_CHECKSUM |
ipheader;
/* CPU already calculated pseudo header checksum. So, use it */
pkt_info.l4i_chk = skb->h.th->check;
status = eth_port_send(mp, &pkt_info);
if (status != ETH_OK) {
if ((status == ETH_ERROR))
printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name);
if (status == ETH_QUEUE_FULL)
printk("Error on Queue Full \n");
if (status == ETH_QUEUE_LAST_RESOURCE)
printk("Tx resource error \n");
}
/* Check for the remaining frags */
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
pkt_info.l4i_chk = 0x0000;
pkt_info.cmd_sts = 0x00000000;
/* Last Frag enables interrupt and frees the skb */
if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT |
ETH_TX_LAST_DESC;
pkt_info.return_info = skb;
mp->tx_ring_skbs++;
}
else {
pkt_info.return_info = 0;
}
pkt_info.byte_cnt = this_frag->size;
if (this_frag->size < 8)
printk("%d : \n", skb_shinfo(skb)->nr_frags);
pkt_info.buf_ptr = pci_map_page(NULL, this_frag->page,
this_frag->page_offset,
this_frag->size, PCI_DMA_TODEVICE);
status = eth_port_send(mp, &pkt_info);
if (status != ETH_OK) {
if ((status == ETH_ERROR))
printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name);
if (status == ETH_QUEUE_LAST_RESOURCE)
printk("Tx resource error \n");
if (status == ETH_QUEUE_FULL)
printk("Queue is full \n");
}
}
}
#endif
/* Check if TX queue can handle another skb. If not, then
* signal higher layers to stop requesting TX
*/
if (MV64340_TX_QUEUE_SIZE <= (mp->tx_ring_skbs + 1))
/*
* Stop getting skb's from upper layers.
* Getting skb's from upper layers will be enabled again after
* packets are released.
*/
netif_stop_queue(dev);
/* Update statistics and start of transmittion time */
stats->tx_bytes += skb->len;
stats->tx_packets++;
dev->trans_start = jiffies;
spin_unlock_irqrestore(&mp->lock, flags);
return 0; /* success */
}
/*
* mv64340_eth_get_stats
*
* Returns a pointer to the interface statistics.
*
* Input : dev - a pointer to the required interface
*
* Output : a pointer to the interface's statistics
*/
static struct net_device_stats *mv64340_eth_get_stats(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
return &mp->stats;
}
/*/
* mv64340_eth_init
*
* First function called after registering the network device.
* It's purpose is to initialize the device as an ethernet device,
* fill the structure that was given in registration with pointers
* to functions, and setting the MAC address of the interface
*
* Input : number of port to initialize
* Output : -ENONMEM if failed , 0 if success
*/
static struct net_device *mv64340_eth_init(int port_num)
{
struct mv64340_private *mp;
struct net_device *dev;
int err;
dev = alloc_etherdev(sizeof(struct mv64340_private));
if (!dev)
return NULL;
mp = netdev_priv(dev);
dev->irq = ETH_PORT0_IRQ_NUM + port_num;
dev->open = mv64340_eth_open;
dev->stop = mv64340_eth_stop;
dev->hard_start_xmit = mv64340_eth_start_xmit;
dev->get_stats = mv64340_eth_get_stats;
dev->set_mac_address = mv64340_eth_set_mac_address;
dev->set_multicast_list = mv64340_eth_set_rx_mode;
/* No need to Tx Timeout */
dev->tx_timeout = mv64340_eth_tx_timeout;
#ifdef MV64340_NAPI
dev->poll = mv64340_poll;
dev->weight = 64;
#endif
dev->watchdog_timeo = 2 * HZ;
dev->tx_queue_len = MV64340_TX_QUEUE_SIZE;
dev->base_addr = 0;
dev->change_mtu = mv64340_eth_change_mtu;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
#ifdef MAX_SKB_FRAGS
#ifndef CONFIG_JAGUAR_DMALOW
/*
* Zero copy can only work if we use Discovery II memory. Else, we will
* have to map the buffers to ISA memory which is only 16 MB
*/
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM;
#endif
#endif
#endif
mp->port_num = port_num;
/* Configure the timeout task */
INIT_WORK(&mp->tx_timeout_task,
(void (*)(void *))mv64340_eth_tx_timeout_task, dev);
spin_lock_init(&mp->lock);
/* set MAC addresses */
memcpy(dev->dev_addr, prom_mac_addr_base, 6);
dev->dev_addr[5] += port_num;
err = register_netdev(dev);
if (err)
goto out_free_dev;
printk(KERN_NOTICE "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
dev->name, port_num,
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
if (dev->features & NETIF_F_SG)
printk("Scatter Gather Enabled ");
if (dev->features & NETIF_F_IP_CSUM)
printk("TX TCP/IP Checksumming Supported \n");
printk("RX TCP/UDP Checksum Offload ON, \n");
printk("TX and RX Interrupt Coalescing ON \n");
#ifdef MV64340_NAPI
printk("RX NAPI Enabled \n");
#endif
return dev;
out_free_dev:
free_netdev(dev);
return NULL;
}
static void mv64340_eth_remove(struct net_device *dev)
{
struct mv64340_private *mp = netdev_priv(dev);
unregister_netdev(dev);
flush_scheduled_work();
free_netdev(dev);
}
static struct net_device *mv64340_dev0;
static struct net_device *mv64340_dev1;
static struct net_device *mv64340_dev2;
/*
* mv64340_init_module
*
* Registers the network drivers into the Linux kernel
*
* Input : N/A
*
* Output : N/A
*/
static int __init mv64340_init_module(void)
{
printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
#ifdef CONFIG_MV643XX_ETH_0
mv64340_dev0 = mv64340_eth_init(0);
if (!mv64340_dev0) {
printk(KERN_ERR
"Error registering MV-64360 ethernet port 0\n");
}
#endif
#ifdef CONFIG_MV643XX_ETH_1
mv64340_dev1 = mv64340_eth_init(1);
if (!mv64340_dev1) {
printk(KERN_ERR
"Error registering MV-64360 ethernet port 1\n");
}
#endif
#ifdef CONFIG_MV643XX_ETH_2
mv64340_dev2 = mv64340_eth_init(2);
if (!mv64340_dev2) {
printk(KERN_ERR
"Error registering MV-64360 ethernet port 2\n");
}
#endif
return 0;
}
/*
* mv64340_cleanup_module
*
* Registers the network drivers into the Linux kernel
*
* Input : N/A
*
* Output : N/A
*/
static void __exit mv64340_cleanup_module(void)
{
if (mv64340_dev2)
mv64340_eth_remove(mv64340_dev2);
if (mv64340_dev1)
mv64340_eth_remove(mv64340_dev1);
if (mv64340_dev0)
mv64340_eth_remove(mv64340_dev0);
}
module_init(mv64340_init_module);
module_exit(mv64340_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm and Manish Lachwani");
MODULE_DESCRIPTION("Ethernet driver for Marvell MV64340");
/*
* The second part is the low level driver of the gigE ethernet ports.
*/
/*
* Marvell's Gigabit Ethernet controller low level driver
*
* DESCRIPTION:
* This file introduce low level API to Marvell's Gigabit Ethernet
* controller. This Gigabit Ethernet Controller driver API controls
* 1) Operations (i.e. port init, start, reset etc').
* 2) Data flow (i.e. port send, receive etc').
* Each Gigabit Ethernet port is controlled via
* struct mv64340_private.
* This struct includes user configuration information as well as
* driver internal data needed for its operations.
*
* Supported Features:
* - This low level driver is OS independent. Allocating memory for
* the descriptor rings and buffers are not within the scope of
* this driver.
* - The user is free from Rx/Tx queue managing.
* - This low level driver introduce functionality API that enable
* the to operate Marvell's Gigabit Ethernet Controller in a
* convenient way.
* - Simple Gigabit Ethernet port operation API.
* - Simple Gigabit Ethernet port data flow API.
* - Data flow and operation API support per queue functionality.
* - Support cached descriptors for better performance.
* - Enable access to all four DRAM banks and internal SRAM memory
* spaces.
* - PHY access and control API.
* - Port control register configuration API.
* - Full control over Unicast and Multicast MAC configurations.
*
* Operation flow:
*
* Initialization phase
* This phase complete the initialization of the the mv64340_private
* struct.
* User information regarding port configuration has to be set
* prior to calling the port initialization routine.
*
* In this phase any port Tx/Rx activity is halted, MIB counters
* are cleared, PHY address is set according to user parameter and
* access to DRAM and internal SRAM memory spaces.
*
* Driver ring initialization
* Allocating memory for the descriptor rings and buffers is not
* within the scope of this driver. Thus, the user is required to
* allocate memory for the descriptors ring and buffers. Those
* memory parameters are used by the Rx and Tx ring initialization
* routines in order to curve the descriptor linked list in a form
* of a ring.
* Note: Pay special attention to alignment issues when using
* cached descriptors/buffers. In this phase the driver store
* information in the mv64340_private struct regarding each queue
* ring.
*
* Driver start
* This phase prepares the Ethernet port for Rx and Tx activity.
* It uses the information stored in the mv64340_private struct to
* initialize the various port registers.
*
* Data flow:
* All packet references to/from the driver are done using
* struct pkt_info.
* This struct is a unified struct used with Rx and Tx operations.
* This way the user is not required to be familiar with neither
* Tx nor Rx descriptors structures.
* The driver's descriptors rings are management by indexes.
* Those indexes controls the ring resources and used to indicate
* a SW resource error:
* 'current'
* This index points to the current available resource for use. For
* example in Rx process this index will point to the descriptor
* that will be passed to the user upon calling the receive routine.
* In Tx process, this index will point to the descriptor
* that will be assigned with the user packet info and transmitted.
* 'used'
* This index points to the descriptor that need to restore its
* resources. For example in Rx process, using the Rx buffer return
* API will attach the buffer returned in packet info to the
* descriptor pointed by 'used'. In Tx process, using the Tx
* descriptor return will merely return the user packet info with
* the command status of the transmitted buffer pointed by the
* 'used' index. Nevertheless, it is essential to use this routine
* to update the 'used' index.
* 'first'
* This index supports Tx Scatter-Gather. It points to the first
* descriptor of a packet assembled of multiple buffers. For example
* when in middle of Such packet we have a Tx resource error the
* 'curr' index get the value of 'first' to indicate that the ring
* returned to its state before trying to transmit this packet.
*
* Receive operation:
* The eth_port_receive API set the packet information struct,
* passed by the caller, with received information from the
* 'current' SDMA descriptor.
* It is the user responsibility to return this resource back
* to the Rx descriptor ring to enable the reuse of this source.
* Return Rx resource is done using the eth_rx_return_buff API.
*
* Transmit operation:
* The eth_port_send API supports Scatter-Gather which enables to
* send a packet spanned over multiple buffers. This means that
* for each packet info structure given by the user and put into
* the Tx descriptors ring, will be transmitted only if the 'LAST'
* bit will be set in the packet info command status field. This
* API also consider restriction regarding buffer alignments and
* sizes.
* The user must return a Tx resource after ensuring the buffer
* has been transmitted to enable the Tx ring indexes to update.
*
* BOARD LAYOUT
* This device is on-board. No jumper diagram is necessary.
*
* EXTERNAL INTERFACE
*
* Prior to calling the initialization routine eth_port_init() the user
* must set the following fields under mv64340_private struct:
* port_num User Ethernet port number.
* port_mac_addr[6] User defined port MAC address.
* port_config User port configuration value.
* port_config_extend User port config extend value.
* port_sdma_config User port SDMA config value.
* port_serial_control User port serial control value.
*
* This driver introduce a set of default values:
* PORT_CONFIG_VALUE Default port configuration value
* PORT_CONFIG_EXTEND_VALUE Default port extend configuration value
* PORT_SDMA_CONFIG_VALUE Default sdma control value
* PORT_SERIAL_CONTROL_VALUE Default port serial control value
*
* This driver data flow is done using the struct pkt_info which
* is a unified struct for Rx and Tx operations:
*
* byte_cnt Tx/Rx descriptor buffer byte count.
* l4i_chk CPU provided TCP Checksum. For Tx operation
* only.
* cmd_sts Tx/Rx descriptor command status.
* buf_ptr Tx/Rx descriptor buffer pointer.
* return_info Tx/Rx user resource return information.
*/
/* defines */
/* SDMA command macros */
#define ETH_ENABLE_TX_QUEUE(eth_port) \
MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
#define ETH_DISABLE_TX_QUEUE(eth_port) \
MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), \
(1 << 8))
#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \
(1 << rx_queue))
#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \
(1 << (8 + rx_queue)))
#define LINK_UP_TIMEOUT 100000
#define PHY_BUSY_TIMEOUT 10000000
/* locals */
/* PHY routines */
static int ethernet_phy_get(unsigned int eth_port_num);
/* Ethernet Port routines */
static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
int option);
/*
* eth_port_init - Initialize the Ethernet port driver
*
* DESCRIPTION:
* This function prepares the ethernet port to start its activity:
* 1) Completes the ethernet port driver struct initialization toward port
* start routine.
* 2) Resets the device to a quiescent state in case of warm reboot.
* 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
* 4) Clean MAC tables. The reset status of those tables is unknown.
* 5) Set PHY address.
* Note: Call this routine prior to eth_port_start routine and after
* setting user values in the user fields of Ethernet port control
* struct.
*
* INPUT:
* struct mv64340_private *mp Ethernet port control struct
*
* OUTPUT:
* See description.
*
* RETURN:
* None.
*/
static void eth_port_init(struct mv64340_private * mp)
{
mp->port_config = PORT_CONFIG_VALUE;
mp->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
#if defined(__BIG_ENDIAN)
mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
#elif defined(__LITTLE_ENDIAN)
mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE |
ETH_BLM_RX_NO_SWAP | ETH_BLM_TX_NO_SWAP;
#else
#error One of __LITTLE_ENDIAN or __BIG_ENDIAN must be defined!
#endif
mp->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
mp->port_rx_queue_command = 0;
mp->port_tx_queue_command = 0;
mp->rx_resource_err = 0;
mp->tx_resource_err = 0;
eth_port_reset(mp->port_num);
eth_port_init_mac_tables(mp->port_num);
ethernet_phy_reset(mp->port_num);
}
/*
* eth_port_start - Start the Ethernet port activity.
*
* DESCRIPTION:
* This routine prepares the Ethernet port for Rx and Tx activity:
* 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
* has been initialized a descriptor's ring (using
* ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
* 2. Initialize and enable the Ethernet configuration port by writing to
* the port's configuration and command registers.
* 3. Initialize and enable the SDMA by writing to the SDMA's
* configuration and command registers. After completing these steps,
* the ethernet port SDMA can starts to perform Rx and Tx activities.
*
* Note: Each Rx and Tx queue descriptor's list must be initialized prior
* to calling this function (use ether_init_tx_desc_ring for Tx queues
* and ether_init_rx_desc_ring for Rx queues).
*
* INPUT:
* struct mv64340_private *mp Ethernet port control struct
*
* OUTPUT:
* Ethernet port is ready to receive and transmit.
*
* RETURN:
* false if the port PHY is not up.
* true otherwise.
*/
static int eth_port_start(struct mv64340_private *mp)
{
unsigned int eth_port_num = mp->port_num;
int tx_curr_desc, rx_curr_desc;
unsigned int phy_reg_data;
/* Assignment of Tx CTRP of given queue */
tx_curr_desc = mp->tx_curr_desc_q;
MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num),
(struct eth_tx_desc *) mp->tx_desc_dma + tx_curr_desc);
/* Assignment of Rx CRDP of given queue */
rx_curr_desc = mp->rx_curr_desc_q;
MV_WRITE(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num),
(struct eth_rx_desc *) mp->rx_desc_dma + rx_curr_desc);
/* Add the assigned Ethernet address to the port's address table */
eth_port_uc_addr_set(mp->port_num, mp->port_mac_addr);
/* Assign port configuration and command. */
MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num),
mp->port_config);
MV_WRITE(MV64340_ETH_PORT_CONFIG_EXTEND_REG(eth_port_num),
mp->port_config_extend);
MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
mp->port_serial_control);
MV_SET_REG_BITS(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
ETH_SERIAL_PORT_ENABLE);
/* Assign port SDMA configuration */
MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
mp->port_sdma_config);
/* Enable port Rx. */
MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port_num),
mp->port_rx_queue_command);
/* Check if link is up */
eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
if (!(phy_reg_data & 0x20))
return 0;
return 1;
}
/*
* eth_port_uc_addr_set - This function Set the port Unicast address.
*
* DESCRIPTION:
* This function Set the port Ethernet MAC address.
*
* INPUT:
* unsigned int eth_port_num Port number.
* char * p_addr Address to be set
*
* OUTPUT:
* Set MAC address low and high registers. also calls eth_port_uc_addr()
* To set the unicast table with the proper information.
*
* RETURN:
* N/A.
*
*/
static void eth_port_uc_addr_set(unsigned int eth_port_num,
unsigned char *p_addr)
{
unsigned int mac_h;
unsigned int mac_l;
mac_l = (p_addr[4] << 8) | (p_addr[5]);
mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
(p_addr[2] << 8) | (p_addr[3] << 0);
MV_WRITE(MV64340_ETH_MAC_ADDR_LOW(eth_port_num), mac_l);
MV_WRITE(MV64340_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h);
/* Accept frames of this address */
eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR);
return;
}
/*
* eth_port_uc_addr - This function Set the port unicast address table
*
* DESCRIPTION:
* This function locates the proper entry in the Unicast table for the
* specified MAC nibble and sets its properties according to function
* parameters.
*
* INPUT:
* unsigned int eth_port_num Port number.
* unsigned char uc_nibble Unicast MAC Address last nibble.
* int option 0 = Add, 1 = remove address.
*
* OUTPUT:
* This function add/removes MAC addresses from the port unicast address
* table.
*
* RETURN:
* true is output succeeded.
* false if option parameter is invalid.
*
*/
static int eth_port_uc_addr(unsigned int eth_port_num,
unsigned char uc_nibble, int option)
{
unsigned int unicast_reg;
unsigned int tbl_offset;
unsigned int reg_offset;
/* Locate the Unicast table entry */
uc_nibble = (0xf & uc_nibble);
tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */
reg_offset = uc_nibble % 4; /* Entry offset within the above register */
switch (option) {
case REJECT_MAC_ADDR:
/* Clear accepts frame bit at specified unicast DA table entry */
unicast_reg = MV_READ((MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
(eth_port_num) + tbl_offset));
unicast_reg &= (0x0E << (8 * reg_offset));
MV_WRITE(
(MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
(eth_port_num) + tbl_offset), unicast_reg);
break;
case ACCEPT_MAC_ADDR:
/* Set accepts frame bit at unicast DA filter table entry */
unicast_reg =
MV_READ(
(MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
(eth_port_num) + tbl_offset));
unicast_reg |= (0x01 << (8 * reg_offset));
MV_WRITE(
(MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
(eth_port_num) + tbl_offset), unicast_reg);
break;
default:
return 0;
}
return 1;
}
/*
* eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
*
* DESCRIPTION:
* Go through all the DA filter tables (Unicast, Special Multicast &
* Other Multicast) and set each entry to 0.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
*
* OUTPUT:
* Multicast and Unicast packets are rejected.
*
* RETURN:
* None.
*/
static void eth_port_init_mac_tables(unsigned int eth_port_num)
{
int table_index;
/* Clear DA filter unicast table (Ex_dFUT) */
for (table_index = 0; table_index <= 0xC; table_index += 4)
MV_WRITE(
(MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
(eth_port_num) + table_index), 0);
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
/* Clear DA filter special multicast table (Ex_dFSMT) */
MV_WRITE(
(MV64340_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
(eth_port_num) + table_index), 0);
/* Clear DA filter other multicast table (Ex_dFOMT) */
MV_WRITE((MV64340_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
(eth_port_num) + table_index), 0);
}
}
/*
* eth_clear_mib_counters - Clear all MIB counters
*
* DESCRIPTION:
* This function clears all MIB counters of a specific ethernet port.
* A read from the MIB counter will reset the counter.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
*
* OUTPUT:
* After reading all MIB counters, the counters resets.
*
* RETURN:
* MIB counter value.
*
*/
static void eth_clear_mib_counters(unsigned int eth_port_num)
{
int i;
/* Perform dummy reads from MIB counters */
for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; i += 4)
MV_READ(MV64340_ETH_MIB_COUNTERS_BASE(eth_port_num) + i);
}
/*
* ethernet_phy_get - Get the ethernet port PHY address.
*
* DESCRIPTION:
* This routine returns the given ethernet port PHY address.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
*
* OUTPUT:
* None.
*
* RETURN:
* PHY address.
*
*/
static int ethernet_phy_get(unsigned int eth_port_num)
{
unsigned int reg_data;
reg_data = MV_READ(MV64340_ETH_PHY_ADDR_REG);
return ((reg_data >> (5 * eth_port_num)) & 0x1f);
}
/*
* ethernet_phy_reset - Reset Ethernet port PHY.
*
* DESCRIPTION:
* This routine utilize the SMI interface to reset the ethernet port PHY.
* The routine waits until the link is up again or link up is timeout.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
*
* OUTPUT:
* The ethernet port PHY renew its link.
*
* RETURN:
* None.
*
*/
static int ethernet_phy_reset(unsigned int eth_port_num)
{
unsigned int time_out = 50;
unsigned int phy_reg_data;
/* Reset the PHY */
eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
/* Poll on the PHY LINK */
do {
eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
if (time_out-- == 0)
return 0;
} while (!(phy_reg_data & 0x20));
return 1;
}
/*
* eth_port_reset - Reset Ethernet port
*
* DESCRIPTION:
* This routine resets the chip by aborting any SDMA engine activity and
* clearing the MIB counters. The Receiver and the Transmit unit are in
* idle state after this command is performed and the port is disabled.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
*
* OUTPUT:
* Channel activity is halted.
*
* RETURN:
* None.
*
*/
static void eth_port_reset(unsigned int eth_port_num)
{
unsigned int reg_data;
/* Stop Tx port activity. Check port Tx activity. */
reg_data =
MV_READ(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port_num));
if (reg_data & 0xFF) {
/* Issue stop command for active channels only */
MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
(eth_port_num), (reg_data << 8));
/* Wait for all Tx activity to terminate. */
do {
/* Check port cause register that all Tx queues are stopped */
reg_data =
MV_READ
(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
(eth_port_num));
}
while (reg_data & 0xFF);
}
/* Stop Rx port activity. Check port Rx activity. */
reg_data =
MV_READ(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
(eth_port_num));
if (reg_data & 0xFF) {
/* Issue stop command for active channels only */
MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
(eth_port_num), (reg_data << 8));
/* Wait for all Rx activity to terminate. */
do {
/* Check port cause register that all Rx queues are stopped */
reg_data =
MV_READ
(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
(eth_port_num));
}
while (reg_data & 0xFF);
}
/* Clear all MIB counters */
eth_clear_mib_counters(eth_port_num);
/* Reset the Enable bit in the Configuration Register */
reg_data =
MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num));
reg_data &= ~ETH_SERIAL_PORT_ENABLE;
MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num), reg_data);
return;
}
/*
* ethernet_set_config_reg - Set specified bits in configuration register.
*
* DESCRIPTION:
* This function sets specified bits in the given ethernet
* configuration register.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
* unsigned int value 32 bit value.
*
* OUTPUT:
* The set bits in the value parameter are set in the configuration
* register.
*
* RETURN:
* None.
*
*/
static void ethernet_set_config_reg(unsigned int eth_port_num,
unsigned int value)
{
unsigned int eth_config_reg;
eth_config_reg =
MV_READ(MV64340_ETH_PORT_CONFIG_REG(eth_port_num));
eth_config_reg |= value;
MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num),
eth_config_reg);
}
/*
* ethernet_get_config_reg - Get the port configuration register
*
* DESCRIPTION:
* This function returns the configuration register value of the given
* ethernet port.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
*
* OUTPUT:
* None.
*
* RETURN:
* Port configuration register value.
*/
static unsigned int ethernet_get_config_reg(unsigned int eth_port_num)
{
unsigned int eth_config_reg;
eth_config_reg = MV_READ(MV64340_ETH_PORT_CONFIG_EXTEND_REG
(eth_port_num));
return eth_config_reg;
}
/*
* eth_port_read_smi_reg - Read PHY registers
*
* DESCRIPTION:
* This routine utilize the SMI interface to interact with the PHY in
* order to perform PHY register read.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
* unsigned int phy_reg PHY register address offset.
* unsigned int *value Register value buffer.
*
* OUTPUT:
* Write the value of a specified PHY register into given buffer.
*
* RETURN:
* false if the PHY is busy or read data is not in valid state.
* true otherwise.
*
*/
static int eth_port_read_smi_reg(unsigned int eth_port_num,
unsigned int phy_reg, unsigned int *value)
{
int phy_addr = ethernet_phy_get(eth_port_num);
unsigned int time_out = PHY_BUSY_TIMEOUT;
unsigned int reg_value;
/* first check that it is not busy */
do {
reg_value = MV_READ(MV64340_ETH_SMI_REG);
if (time_out-- == 0)
return 0;
} while (reg_value & ETH_SMI_BUSY);
/* not busy */
MV_WRITE(MV64340_ETH_SMI_REG,
(phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
time_out = PHY_BUSY_TIMEOUT; /* initialize the time out var again */
do {
reg_value = MV_READ(MV64340_ETH_SMI_REG);
if (time_out-- == 0)
return 0;
} while (reg_value & ETH_SMI_READ_VALID);
/* Wait for the data to update in the SMI register */
for (time_out = 0; time_out < PHY_BUSY_TIMEOUT; time_out++);
reg_value = MV_READ(MV64340_ETH_SMI_REG);
*value = reg_value & 0xffff;
return 1;
}
/*
* eth_port_write_smi_reg - Write to PHY registers
*
* DESCRIPTION:
* This routine utilize the SMI interface to interact with the PHY in
* order to perform writes to PHY registers.
*
* INPUT:
* unsigned int eth_port_num Ethernet Port number.
* unsigned int phy_reg PHY register address offset.
* unsigned int value Register value.
*
* OUTPUT:
* Write the given value to the specified PHY register.
*
* RETURN:
* false if the PHY is busy.
* true otherwise.
*
*/
static int eth_port_write_smi_reg(unsigned int eth_port_num,
unsigned int phy_reg, unsigned int value)
{
unsigned int time_out = PHY_BUSY_TIMEOUT;
unsigned int reg_value;
int phy_addr;
phy_addr = ethernet_phy_get(eth_port_num);
/* first check that it is not busy */
do {
reg_value = MV_READ(MV64340_ETH_SMI_REG);
if (time_out-- == 0)
return 0;
} while (reg_value & ETH_SMI_BUSY);
/* not busy */
MV_WRITE(MV64340_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
ETH_SMI_OPCODE_WRITE | (value & 0xffff));
return 1;
}
/*
* eth_port_send - Send an Ethernet packet
*
* DESCRIPTION:
* This routine send a given packet described by p_pktinfo parameter. It
* supports transmitting of a packet spaned over multiple buffers. The
* routine updates 'curr' and 'first' indexes according to the packet
* segment passed to the routine. In case the packet segment is first,
* the 'first' index is update. In any case, the 'curr' index is updated.
* If the routine get into Tx resource error it assigns 'curr' index as
* 'first'. This way the function can abort Tx process of multiple
* descriptors per packet.
*
* INPUT:
* struct mv64340_private *mp Ethernet Port Control srtuct.
* struct pkt_info *p_pkt_info User packet buffer.
*
* OUTPUT:
* Tx ring 'curr' and 'first' indexes are updated.
*
* RETURN:
* ETH_QUEUE_FULL in case of Tx resource error.
* ETH_ERROR in case the routine can not access Tx desc ring.
* ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
* ETH_OK otherwise.
*
*/
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
/*
* Modified to include the first descriptor pointer in case of SG
*/
static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private * mp,
struct pkt_info * p_pkt_info)
{
int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc;
volatile struct eth_tx_desc *current_descriptor;
volatile struct eth_tx_desc *first_descriptor;
u32 command_status, first_chip_ptr;
/* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err)
return ETH_QUEUE_FULL;
/* Get the Tx Desc ring indexes */
tx_desc_curr = mp->tx_curr_desc_q;
tx_desc_used = mp->tx_used_desc_q;
current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
if (current_descriptor == NULL)
return ETH_ERROR;
tx_next_desc = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
if (command_status & ETH_TX_FIRST_DESC) {
tx_first_desc = tx_desc_curr;
mp->tx_first_desc_q = tx_first_desc;
/* fill first descriptor */
first_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
first_descriptor->l4i_chk = p_pkt_info->l4i_chk;
first_descriptor->cmd_sts = command_status;
first_descriptor->byte_cnt = p_pkt_info->byte_cnt;
first_descriptor->buf_ptr = p_pkt_info->buf_ptr;
first_descriptor->next_desc_ptr = mp->tx_desc_dma +
tx_next_desc * sizeof(struct eth_tx_desc);
wmb();
} else {
tx_first_desc = mp->tx_first_desc_q;
first_descriptor = &mp->p_tx_desc_area[tx_first_desc];
if (first_descriptor == NULL) {
printk("First desc is NULL !!\n");
return ETH_ERROR;
}
if (command_status & ETH_TX_LAST_DESC)
current_descriptor->next_desc_ptr = 0x00000000;
else {
command_status |= ETH_BUFFER_OWNED_BY_DMA;
current_descriptor->next_desc_ptr = mp->tx_desc_dma +
tx_next_desc * sizeof(struct eth_tx_desc);
}
}
if (p_pkt_info->byte_cnt < 8) {
printk(" < 8 problem \n");
return ETH_ERROR;
}
current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
current_descriptor->l4i_chk = p_pkt_info->l4i_chk;
current_descriptor->cmd_sts = command_status;
mp->tx_skb[tx_desc_curr] = (struct sk_buff*) p_pkt_info->return_info;
wmb();
/* Set last desc with DMA ownership and interrupt enable. */
if (command_status & ETH_TX_LAST_DESC) {
current_descriptor->cmd_sts = command_status |
ETH_TX_ENABLE_INTERRUPT |
ETH_BUFFER_OWNED_BY_DMA;
if (!(command_status & ETH_TX_FIRST_DESC))
first_descriptor->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
wmb();
first_chip_ptr = MV_READ(MV64340_ETH_CURRENT_SERVED_TX_DESC_PTR(mp->port_num));
/* Apply send command */
if (first_chip_ptr == 0x00000000)
MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(mp->port_num), (struct eth_tx_desc *) mp->tx_desc_dma + tx_first_desc);
ETH_ENABLE_TX_QUEUE(mp->port_num);
/*
* Finish Tx packet. Update first desc in case of Tx resource
* error */
tx_first_desc = tx_next_desc;
mp->tx_first_desc_q = tx_first_desc;
} else {
if (! (command_status & ETH_TX_FIRST_DESC) ) {
current_descriptor->cmd_sts = command_status;
wmb();
}
}
/* Check for ring index overlap in the Tx desc ring */
if (tx_next_desc == tx_desc_used) {
mp->tx_resource_err = 1;
mp->tx_curr_desc_q = tx_first_desc;
return ETH_QUEUE_LAST_RESOURCE;
}
mp->tx_curr_desc_q = tx_next_desc;
wmb();
return ETH_OK;
}
#else
static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private * mp,
struct pkt_info * p_pkt_info)
{
int tx_desc_curr;
int tx_desc_used;
volatile struct eth_tx_desc* current_descriptor;
unsigned int command_status;
/* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err)
return ETH_QUEUE_FULL;
/* Get the Tx Desc ring indexes */
tx_desc_curr = mp->tx_curr_desc_q;
tx_desc_used = mp->tx_used_desc_q;
current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
if (current_descriptor == NULL)
return ETH_ERROR;
command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
/* XXX Is this for real ?!?!? */
/* Buffers with a payload smaller than 8 bytes must be aligned to a
* 64-bit boundary. We use the memory allocated for Tx descriptor.
* This memory is located in TX_BUF_OFFSET_IN_DESC offset within the
* Tx descriptor. */
if (p_pkt_info->byte_cnt <= 8) {
printk(KERN_ERR
"You have failed in the < 8 bytes errata - fixme\n");
return ETH_ERROR;
}
current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
mp->tx_skb[tx_desc_curr] = (struct sk_buff *) p_pkt_info->return_info;
mb();
/* Set last desc with DMA ownership and interrupt enable. */
current_descriptor->cmd_sts = command_status |
ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
/* Apply send command */
ETH_ENABLE_TX_QUEUE(mp->port_num);
/* Finish Tx packet. Update first desc in case of Tx resource error */
tx_desc_curr = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
/* Update the current descriptor */
mp->tx_curr_desc_q = tx_desc_curr;
/* Check for ring index overlap in the Tx desc ring */
if (tx_desc_curr == tx_desc_used) {
mp->tx_resource_err = 1;
return ETH_QUEUE_LAST_RESOURCE;
}
return ETH_OK;
}
#endif
/*
* eth_tx_return_desc - Free all used Tx descriptors
*
* DESCRIPTION:
* This routine returns the transmitted packet information to the caller.
* It uses the 'first' index to support Tx desc return in case a transmit
* of a packet spanned over multiple buffer still in process.
* In case the Tx queue was in "resource error" condition, where there are
* no available Tx resources, the function resets the resource error flag.
*
* INPUT:
* struct mv64340_private *mp Ethernet Port Control srtuct.
* struct pkt_info *p_pkt_info User packet buffer.
*
* OUTPUT:
* Tx ring 'first' and 'used' indexes are updated.
*
* RETURN:
* ETH_ERROR in case the routine can not access Tx desc ring.
* ETH_RETRY in case there is transmission in process.
* ETH_END_OF_JOB if the routine has nothing to release.
* ETH_OK otherwise.
*
*/
static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv64340_private * mp,
struct pkt_info * p_pkt_info)
{
int tx_desc_used, tx_desc_curr;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
int tx_first_desc;
#endif
volatile struct eth_tx_desc *p_tx_desc_used;
unsigned int command_status;
/* Get the Tx Desc ring indexes */
tx_desc_curr = mp->tx_curr_desc_q;
tx_desc_used = mp->tx_used_desc_q;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
tx_first_desc = mp->tx_first_desc_q;
#endif
p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
/* XXX Sanity check */
if (p_tx_desc_used == NULL)
return ETH_ERROR;
command_status = p_tx_desc_used->cmd_sts;
/* Still transmitting... */
#ifndef MV64340_CHECKSUM_OFFLOAD_TX
if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
return ETH_RETRY;
#endif
/* Stop release. About to overlap the current available Tx descriptor */
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
if (tx_desc_used == tx_first_desc && !mp->tx_resource_err)
return ETH_END_OF_JOB;
#else
if (tx_desc_used == tx_desc_curr && !mp->tx_resource_err)
return ETH_END_OF_JOB;
#endif
/* Pass the packet information to the caller */
p_pkt_info->cmd_sts = command_status;
p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
mp->tx_skb[tx_desc_used] = NULL;
/* Update the next descriptor to release. */
mp->tx_used_desc_q = (tx_desc_used + 1) % MV64340_TX_QUEUE_SIZE;
/* Any Tx return cancels the Tx resource error status */
mp->tx_resource_err = 0;
return ETH_OK;
}
/*
* eth_port_receive - Get received information from Rx ring.
*
* DESCRIPTION:
* This routine returns the received data to the caller. There is no
* data copying during routine operation. All information is returned
* using pointer to packet information struct passed from the caller.
* If the routine exhausts Rx ring resources then the resource error flag
* is set.
*
* INPUT:
* struct mv64340_private *mp Ethernet Port Control srtuct.
* struct pkt_info *p_pkt_info User packet buffer.
*
* OUTPUT:
* Rx ring current and used indexes are updated.
*
* RETURN:
* ETH_ERROR in case the routine can not access Rx desc ring.
* ETH_QUEUE_FULL if Rx ring resources are exhausted.
* ETH_END_OF_JOB if there is no received data.
* ETH_OK otherwise.
*/
static ETH_FUNC_RET_STATUS eth_port_receive(struct mv64340_private * mp,
struct pkt_info * p_pkt_info)
{
int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
volatile struct eth_rx_desc * p_rx_desc;
unsigned int command_status;
/* Do not process Rx ring in case of Rx ring resource error */
if (mp->rx_resource_err)
return ETH_QUEUE_FULL;
/* Get the Rx Desc ring 'curr and 'used' indexes */
rx_curr_desc = mp->rx_curr_desc_q;
rx_used_desc = mp->rx_used_desc_q;
p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc];
/* The following parameters are used to save readings from memory */
command_status = p_rx_desc->cmd_sts;
/* Nothing to receive... */
if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
return ETH_END_OF_JOB;
p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
p_pkt_info->cmd_sts = command_status;
p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET;
p_pkt_info->return_info = mp->rx_skb[rx_curr_desc];
p_pkt_info->l4i_chk = p_rx_desc->buf_size;
/* Clean the return info field to indicate that the packet has been */
/* moved to the upper layers */
mp->rx_skb[rx_curr_desc] = NULL;
/* Update current index in data structure */
rx_next_curr_desc = (rx_curr_desc + 1) % MV64340_RX_QUEUE_SIZE;
mp->rx_curr_desc_q = rx_next_curr_desc;
/* Rx descriptors exhausted. Set the Rx ring resource error flag */
if (rx_next_curr_desc == rx_used_desc)
mp->rx_resource_err = 1;
mb();
return ETH_OK;
}
/*
* eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
*
* DESCRIPTION:
* This routine returns a Rx buffer back to the Rx ring. It retrieves the
* next 'used' descriptor and attached the returned buffer to it.
* In case the Rx ring was in "resource error" condition, where there are
* no available Rx resources, the function resets the resource error flag.
*
* INPUT:
* struct mv64340_private *mp Ethernet Port Control srtuct.
* struct pkt_info *p_pkt_info Information on the returned buffer.
*
* OUTPUT:
* New available Rx resource in Rx descriptor ring.
*
* RETURN:
* ETH_ERROR in case the routine can not access Rx desc ring.
* ETH_OK otherwise.
*/
static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv64340_private * mp,
struct pkt_info * p_pkt_info)
{
int used_rx_desc; /* Where to return Rx resource */
volatile struct eth_rx_desc* p_used_rx_desc;
/* Get 'used' Rx descriptor */
used_rx_desc = mp->rx_used_desc_q;
p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc];
p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
p_used_rx_desc->buf_size = p_pkt_info->byte_cnt;
mp->rx_skb[used_rx_desc] = p_pkt_info->return_info;
/* Flush the write pipe */
mb();
/* Return the descriptor to DMA ownership */
p_used_rx_desc->cmd_sts =
ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
/* Flush descriptor and CPU pipe */
mb();
/* Move the used descriptor pointer to the next descriptor */
mp->rx_used_desc_q = (used_rx_desc + 1) % MV64340_RX_QUEUE_SIZE;
/* Any Rx return cancels the Rx resource error status */
mp->rx_resource_err = 0;
return ETH_OK;
}
#ifndef __MV64340_ETH_H__
#define __MV64340_ETH_H__
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mv643xx.h>
#define BIT0 0x00000001
#define BIT1 0x00000002
#define BIT2 0x00000004
#define BIT3 0x00000008
#define BIT4 0x00000010
#define BIT5 0x00000020
#define BIT6 0x00000040
#define BIT7 0x00000080
#define BIT8 0x00000100
#define BIT9 0x00000200
#define BIT10 0x00000400
#define BIT11 0x00000800
#define BIT12 0x00001000
#define BIT13 0x00002000
#define BIT14 0x00004000
#define BIT15 0x00008000
#define BIT16 0x00010000
#define BIT17 0x00020000
#define BIT18 0x00040000
#define BIT19 0x00080000
#define BIT20 0x00100000
#define BIT21 0x00200000
#define BIT22 0x00400000
#define BIT23 0x00800000
#define BIT24 0x01000000
#define BIT25 0x02000000
#define BIT26 0x04000000
#define BIT27 0x08000000
#define BIT28 0x10000000
#define BIT29 0x20000000
#define BIT30 0x40000000
#define BIT31 0x80000000
/*
* The first part is the high level driver of the gigE ethernet ports.
*/
#define ETH_PORT0_IRQ_NUM 48 /* main high register, bit0 */
#define ETH_PORT1_IRQ_NUM ETH_PORT0_IRQ_NUM+1 /* main high register, bit1 */
#define ETH_PORT2_IRQ_NUM ETH_PORT0_IRQ_NUM+2 /* main high register, bit1 */
/* Checksum offload for Tx works */
#define MV64340_CHECKSUM_OFFLOAD_TX
#define MV64340_NAPI
#define MV64340_TX_FAST_REFILL
#undef MV64340_COAL
/*
* Number of RX / TX descriptors on RX / TX rings.
* Note that allocating RX descriptors is done by allocating the RX
* ring AND a preallocated RX buffers (skb's) for each descriptor.
* The TX descriptors only allocates the TX descriptors ring,
* with no pre allocated TX buffers (skb's are allocated by higher layers.
*/
/* Default TX ring size is 1000 descriptors */
#define MV64340_TX_QUEUE_SIZE 1000
/* Default RX ring size is 400 descriptors */
#define MV64340_RX_QUEUE_SIZE 400
#define MV64340_TX_COAL 100
#ifdef MV64340_COAL
#define MV64340_RX_COAL 100
#endif
/*
* The second part is the low level driver of the gigE ethernet ports. *
*/
/*
* Header File for : MV-643xx network interface header
*
* DESCRIPTION:
* This header file contains macros typedefs and function declaration for
* the Marvell Gig Bit Ethernet Controller.
*
* DEPENDENCIES:
* None.
*
*/
/* Default port configuration value */
#define PORT_CONFIG_VALUE \
ETH_UNICAST_NORMAL_MODE | \
ETH_DEFAULT_RX_QUEUE_0 | \
ETH_DEFAULT_RX_ARP_QUEUE_0 | \
ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP | \
ETH_RECEIVE_BC_IF_IP | \
ETH_RECEIVE_BC_IF_ARP | \
ETH_CAPTURE_TCP_FRAMES_DIS | \
ETH_CAPTURE_UDP_FRAMES_DIS | \
ETH_DEFAULT_RX_TCP_QUEUE_0 | \
ETH_DEFAULT_RX_UDP_QUEUE_0 | \
ETH_DEFAULT_RX_BPDU_QUEUE_0
/* Default port extend configuration value */
#define PORT_CONFIG_EXTEND_VALUE \
ETH_SPAN_BPDU_PACKETS_AS_NORMAL | \
ETH_PARTITION_DISABLE
/* Default sdma control value */
#define PORT_SDMA_CONFIG_VALUE \
ETH_RX_BURST_SIZE_16_64BIT | \
GT_ETH_IPG_INT_RX(0) | \
ETH_TX_BURST_SIZE_16_64BIT;
#define GT_ETH_IPG_INT_RX(value) \
((value & 0x3fff) << 8)
/* Default port serial control value */
#define PORT_SERIAL_CONTROL_VALUE \
ETH_FORCE_LINK_PASS | \
ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \
ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \
ETH_ADV_SYMMETRIC_FLOW_CTRL | \
ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
ETH_FORCE_BP_MODE_NO_JAM | \
BIT9 | \
ETH_DO_NOT_FORCE_LINK_FAIL | \
ETH_RETRANSMIT_16_ATTEMPTS | \
ETH_ENABLE_AUTO_NEG_SPEED_GMII | \
ETH_DTE_ADV_0 | \
ETH_DISABLE_AUTO_NEG_BYPASS | \
ETH_AUTO_NEG_NO_CHANGE | \
ETH_MAX_RX_PACKET_9700BYTE | \
ETH_CLR_EXT_LOOPBACK | \
ETH_SET_FULL_DUPLEX_MODE | \
ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX
#define RX_BUFFER_MAX_SIZE 0x4000000
#define TX_BUFFER_MAX_SIZE 0x4000000
/* MAC accepet/reject macros */
#define ACCEPT_MAC_ADDR 0
#define REJECT_MAC_ADDR 1
/* Buffer offset from buffer pointer */
#define RX_BUF_OFFSET 0x2
/* Gigabit Ethernet Unit Global Registers */
/* MIB Counters register definitions */
#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
#define ETH_MIB_FRAMES_64_OCTETS 0x20
#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
#define ETH_MIB_GOOD_FRAMES_SENT 0x40
#define ETH_MIB_EXCESSIVE_COLLISION 0x44
#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
#define ETH_MIB_FC_SENT 0x54
#define ETH_MIB_GOOD_FC_RECEIVED 0x58
#define ETH_MIB_BAD_FC_RECEIVED 0x5c
#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
#define ETH_MIB_OVERSIZE_RECEIVED 0x68
#define ETH_MIB_JABBER_RECEIVED 0x6c
#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
#define ETH_MIB_BAD_CRC_EVENT 0x74
#define ETH_MIB_COLLISION 0x78
#define ETH_MIB_LATE_COLLISION 0x7c
/* Port serial status reg (PSR) */
#define ETH_INTERFACE_GMII_MII 0
#define ETH_INTERFACE_PCM BIT0
#define ETH_LINK_IS_DOWN 0
#define ETH_LINK_IS_UP BIT1
#define ETH_PORT_AT_HALF_DUPLEX 0
#define ETH_PORT_AT_FULL_DUPLEX BIT2
#define ETH_RX_FLOW_CTRL_DISABLED 0
#define ETH_RX_FLOW_CTRL_ENBALED BIT3
#define ETH_GMII_SPEED_100_10 0
#define ETH_GMII_SPEED_1000 BIT4
#define ETH_MII_SPEED_10 0
#define ETH_MII_SPEED_100 BIT5
#define ETH_NO_TX 0
#define ETH_TX_IN_PROGRESS BIT7
#define ETH_BYPASS_NO_ACTIVE 0
#define ETH_BYPASS_ACTIVE BIT8
#define ETH_PORT_NOT_AT_PARTITION_STATE 0
#define ETH_PORT_AT_PARTITION_STATE BIT9
#define ETH_PORT_TX_FIFO_NOT_EMPTY 0
#define ETH_PORT_TX_FIFO_EMPTY BIT10
/* These macros describes the Port configuration reg (Px_cR) bits */
#define ETH_UNICAST_NORMAL_MODE 0
#define ETH_UNICAST_PROMISCUOUS_MODE BIT0
#define ETH_DEFAULT_RX_QUEUE_0 0
#define ETH_DEFAULT_RX_QUEUE_1 BIT1
#define ETH_DEFAULT_RX_QUEUE_2 BIT2
#define ETH_DEFAULT_RX_QUEUE_3 (BIT2 | BIT1)
#define ETH_DEFAULT_RX_QUEUE_4 BIT3
#define ETH_DEFAULT_RX_QUEUE_5 (BIT3 | BIT1)
#define ETH_DEFAULT_RX_QUEUE_6 (BIT3 | BIT2)
#define ETH_DEFAULT_RX_QUEUE_7 (BIT3 | BIT2 | BIT1)
#define ETH_DEFAULT_RX_ARP_QUEUE_0 0
#define ETH_DEFAULT_RX_ARP_QUEUE_1 BIT4
#define ETH_DEFAULT_RX_ARP_QUEUE_2 BIT5
#define ETH_DEFAULT_RX_ARP_QUEUE_3 (BIT5 | BIT4)
#define ETH_DEFAULT_RX_ARP_QUEUE_4 BIT6
#define ETH_DEFAULT_RX_ARP_QUEUE_5 (BIT6 | BIT4)
#define ETH_DEFAULT_RX_ARP_QUEUE_6 (BIT6 | BIT5)
#define ETH_DEFAULT_RX_ARP_QUEUE_7 (BIT6 | BIT5 | BIT4)
#define ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP 0
#define ETH_REJECT_BC_IF_NOT_IP_OR_ARP BIT7
#define ETH_RECEIVE_BC_IF_IP 0
#define ETH_REJECT_BC_IF_IP BIT8
#define ETH_RECEIVE_BC_IF_ARP 0
#define ETH_REJECT_BC_IF_ARP BIT9
#define ETH_TX_AM_NO_UPDATE_ERROR_SUMMARY BIT12
#define ETH_CAPTURE_TCP_FRAMES_DIS 0
#define ETH_CAPTURE_TCP_FRAMES_EN BIT14
#define ETH_CAPTURE_UDP_FRAMES_DIS 0
#define ETH_CAPTURE_UDP_FRAMES_EN BIT15
#define ETH_DEFAULT_RX_TCP_QUEUE_0 0
#define ETH_DEFAULT_RX_TCP_QUEUE_1 BIT16
#define ETH_DEFAULT_RX_TCP_QUEUE_2 BIT17
#define ETH_DEFAULT_RX_TCP_QUEUE_3 (BIT17 | BIT16)
#define ETH_DEFAULT_RX_TCP_QUEUE_4 BIT18
#define ETH_DEFAULT_RX_TCP_QUEUE_5 (BIT18 | BIT16)
#define ETH_DEFAULT_RX_TCP_QUEUE_6 (BIT18 | BIT17)
#define ETH_DEFAULT_RX_TCP_QUEUE_7 (BIT18 | BIT17 | BIT16)
#define ETH_DEFAULT_RX_UDP_QUEUE_0 0
#define ETH_DEFAULT_RX_UDP_QUEUE_1 BIT19
#define ETH_DEFAULT_RX_UDP_QUEUE_2 BIT20
#define ETH_DEFAULT_RX_UDP_QUEUE_3 (BIT20 | BIT19)
#define ETH_DEFAULT_RX_UDP_QUEUE_4 (BIT21
#define ETH_DEFAULT_RX_UDP_QUEUE_5 (BIT21 | BIT19)
#define ETH_DEFAULT_RX_UDP_QUEUE_6 (BIT21 | BIT20)
#define ETH_DEFAULT_RX_UDP_QUEUE_7 (BIT21 | BIT20 | BIT19)
#define ETH_DEFAULT_RX_BPDU_QUEUE_0 0
#define ETH_DEFAULT_RX_BPDU_QUEUE_1 BIT22
#define ETH_DEFAULT_RX_BPDU_QUEUE_2 BIT23
#define ETH_DEFAULT_RX_BPDU_QUEUE_3 (BIT23 | BIT22)
#define ETH_DEFAULT_RX_BPDU_QUEUE_4 BIT24
#define ETH_DEFAULT_RX_BPDU_QUEUE_5 (BIT24 | BIT22)
#define ETH_DEFAULT_RX_BPDU_QUEUE_6 (BIT24 | BIT23)
#define ETH_DEFAULT_RX_BPDU_QUEUE_7 (BIT24 | BIT23 | BIT22)
/* These macros describes the Port configuration extend reg (Px_cXR) bits*/
#define ETH_CLASSIFY_EN BIT0
#define ETH_SPAN_BPDU_PACKETS_AS_NORMAL 0
#define ETH_SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 BIT1
#define ETH_PARTITION_DISABLE 0
#define ETH_PARTITION_ENABLE BIT2
/* Tx/Rx queue command reg (RQCR/TQCR)*/
#define ETH_QUEUE_0_ENABLE BIT0
#define ETH_QUEUE_1_ENABLE BIT1
#define ETH_QUEUE_2_ENABLE BIT2
#define ETH_QUEUE_3_ENABLE BIT3
#define ETH_QUEUE_4_ENABLE BIT4
#define ETH_QUEUE_5_ENABLE BIT5
#define ETH_QUEUE_6_ENABLE BIT6
#define ETH_QUEUE_7_ENABLE BIT7
#define ETH_QUEUE_0_DISABLE BIT8
#define ETH_QUEUE_1_DISABLE BIT9
#define ETH_QUEUE_2_DISABLE BIT10
#define ETH_QUEUE_3_DISABLE BIT11
#define ETH_QUEUE_4_DISABLE BIT12
#define ETH_QUEUE_5_DISABLE BIT13
#define ETH_QUEUE_6_DISABLE BIT14
#define ETH_QUEUE_7_DISABLE BIT15
/* These macros describes the Port Sdma configuration reg (SDCR) bits */
#define ETH_RIFB BIT0
#define ETH_RX_BURST_SIZE_1_64BIT 0
#define ETH_RX_BURST_SIZE_2_64BIT BIT1
#define ETH_RX_BURST_SIZE_4_64BIT BIT2
#define ETH_RX_BURST_SIZE_8_64BIT (BIT2 | BIT1)
#define ETH_RX_BURST_SIZE_16_64BIT BIT3
#define ETH_BLM_RX_NO_SWAP BIT4
#define ETH_BLM_RX_BYTE_SWAP 0
#define ETH_BLM_TX_NO_SWAP BIT5
#define ETH_BLM_TX_BYTE_SWAP 0
#define ETH_DESCRIPTORS_BYTE_SWAP BIT6
#define ETH_DESCRIPTORS_NO_SWAP 0
#define ETH_TX_BURST_SIZE_1_64BIT 0
#define ETH_TX_BURST_SIZE_2_64BIT BIT22
#define ETH_TX_BURST_SIZE_4_64BIT BIT23
#define ETH_TX_BURST_SIZE_8_64BIT (BIT23 | BIT22)
#define ETH_TX_BURST_SIZE_16_64BIT BIT24
/* These macros describes the Port serial control reg (PSCR) bits */
#define ETH_SERIAL_PORT_DISABLE 0
#define ETH_SERIAL_PORT_ENABLE BIT0
#define ETH_FORCE_LINK_PASS BIT1
#define ETH_DO_NOT_FORCE_LINK_PASS 0
#define ETH_ENABLE_AUTO_NEG_FOR_DUPLX 0
#define ETH_DISABLE_AUTO_NEG_FOR_DUPLX BIT2
#define ETH_ENABLE_AUTO_NEG_FOR_FLOW_CTRL 0
#define ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL BIT3
#define ETH_ADV_NO_FLOW_CTRL 0
#define ETH_ADV_SYMMETRIC_FLOW_CTRL BIT4
#define ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX 0
#define ETH_FORCE_FC_MODE_TX_PAUSE_DIS BIT5
#define ETH_FORCE_BP_MODE_NO_JAM 0
#define ETH_FORCE_BP_MODE_JAM_TX BIT7
#define ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR BIT8
#define ETH_FORCE_LINK_FAIL 0
#define ETH_DO_NOT_FORCE_LINK_FAIL BIT10
#define ETH_RETRANSMIT_16_ATTEMPTS 0
#define ETH_RETRANSMIT_FOREVER BIT11
#define ETH_DISABLE_AUTO_NEG_SPEED_GMII BIT13
#define ETH_ENABLE_AUTO_NEG_SPEED_GMII 0
#define ETH_DTE_ADV_0 0
#define ETH_DTE_ADV_1 BIT14
#define ETH_DISABLE_AUTO_NEG_BYPASS 0
#define ETH_ENABLE_AUTO_NEG_BYPASS BIT15
#define ETH_AUTO_NEG_NO_CHANGE 0
#define ETH_RESTART_AUTO_NEG BIT16
#define ETH_MAX_RX_PACKET_1518BYTE 0
#define ETH_MAX_RX_PACKET_1522BYTE BIT17
#define ETH_MAX_RX_PACKET_1552BYTE BIT18
#define ETH_MAX_RX_PACKET_9022BYTE (BIT18 | BIT17)
#define ETH_MAX_RX_PACKET_9192BYTE BIT19
#define ETH_MAX_RX_PACKET_9700BYTE (BIT19 | BIT17)
#define ETH_SET_EXT_LOOPBACK BIT20
#define ETH_CLR_EXT_LOOPBACK 0
#define ETH_SET_FULL_DUPLEX_MODE BIT21
#define ETH_SET_HALF_DUPLEX_MODE 0
#define ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX BIT22
#define ETH_DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0
#define ETH_SET_GMII_SPEED_TO_10_100 0
#define ETH_SET_GMII_SPEED_TO_1000 BIT23
#define ETH_SET_MII_SPEED_TO_10 0
#define ETH_SET_MII_SPEED_TO_100 BIT24
/* SMI reg */
#define ETH_SMI_BUSY BIT28 /* 0 - Write, 1 - Read */
#define ETH_SMI_READ_VALID BIT27 /* 0 - Write, 1 - Read */
#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read operation */
#define ETH_SMI_OPCODE_READ BIT26 /* Operation is in progress */
/* SDMA command status fields macros */
/* Tx & Rx descriptors status */
#define ETH_ERROR_SUMMARY (BIT0)
/* Tx & Rx descriptors command */
#define ETH_BUFFER_OWNED_BY_DMA (BIT31)
/* Tx descriptors status */
#define ETH_LC_ERROR (0 )
#define ETH_UR_ERROR (BIT1 )
#define ETH_RL_ERROR (BIT2 )
#define ETH_LLC_SNAP_FORMAT (BIT9 )
/* Rx descriptors status */
#define ETH_CRC_ERROR (0 )
#define ETH_OVERRUN_ERROR (BIT1 )
#define ETH_MAX_FRAME_LENGTH_ERROR (BIT2 )
#define ETH_RESOURCE_ERROR ((BIT2 | BIT1))
#define ETH_VLAN_TAGGED (BIT19)
#define ETH_BPDU_FRAME (BIT20)
#define ETH_TCP_FRAME_OVER_IP_V_4 (0 )
#define ETH_UDP_FRAME_OVER_IP_V_4 (BIT21)
#define ETH_OTHER_FRAME_TYPE (BIT22)
#define ETH_LAYER_2_IS_ETH_V_2 (BIT23)
#define ETH_FRAME_TYPE_IP_V_4 (BIT24)
#define ETH_FRAME_HEADER_OK (BIT25)
#define ETH_RX_LAST_DESC (BIT26)
#define ETH_RX_FIRST_DESC (BIT27)
#define ETH_UNKNOWN_DESTINATION_ADDR (BIT28)
#define ETH_RX_ENABLE_INTERRUPT (BIT29)
#define ETH_LAYER_4_CHECKSUM_OK (BIT30)
/* Rx descriptors byte count */
#define ETH_FRAME_FRAGMENTED (BIT2)
/* Tx descriptors command */
#define ETH_LAYER_4_CHECKSUM_FIRST_DESC (BIT10)
#define ETH_FRAME_SET_TO_VLAN (BIT15)
#define ETH_TCP_FRAME (0 )
#define ETH_UDP_FRAME (BIT16)
#define ETH_GEN_TCP_UDP_CHECKSUM (BIT17)
#define ETH_GEN_IP_V_4_CHECKSUM (BIT18)
#define ETH_ZERO_PADDING (BIT19)
#define ETH_TX_LAST_DESC (BIT20)
#define ETH_TX_FIRST_DESC (BIT21)
#define ETH_GEN_CRC (BIT22)
#define ETH_TX_ENABLE_INTERRUPT (BIT23)
#define ETH_AUTO_MODE (BIT30)
/* typedefs */
typedef enum _eth_func_ret_status {
ETH_OK, /* Returned as expected. */
ETH_ERROR, /* Fundamental error. */
ETH_RETRY, /* Could not process request. Try later. */
ETH_END_OF_JOB, /* Ring has nothing to process. */
ETH_QUEUE_FULL, /* Ring resource error. */
ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
} ETH_FUNC_RET_STATUS;
typedef enum _eth_target {
ETH_TARGET_DRAM,
ETH_TARGET_DEVICE,
ETH_TARGET_CBS,
ETH_TARGET_PCI0,
ETH_TARGET_PCI1
} ETH_TARGET;
/* These are for big-endian machines. Little endian needs different
* definitions.
*/
#if defined(__BIG_ENDIAN)
struct eth_rx_desc {
u16 byte_cnt; /* Descriptor buffer byte count */
u16 buf_size; /* Buffer size */
u32 cmd_sts; /* Descriptor command status */
u32 next_desc_ptr; /* Next descriptor pointer */
u32 buf_ptr; /* Descriptor buffer pointer */
};
struct eth_tx_desc {
u16 byte_cnt; /* buffer byte count */
u16 l4i_chk; /* CPU provided TCP checksum */
u32 cmd_sts; /* Command/status field */
u32 next_desc_ptr; /* Pointer to next descriptor */
u32 buf_ptr; /* pointer to buffer for this descriptor */
};
#elif defined(__LITTLE_ENDIAN)
struct eth_rx_desc {
u32 cmd_sts; /* Descriptor command status */
u16 buf_size; /* Buffer size */
u16 byte_cnt; /* Descriptor buffer byte count */
u32 buf_ptr; /* Descriptor buffer pointer */
u32 next_desc_ptr; /* Next descriptor pointer */
};
struct eth_tx_desc {
u32 cmd_sts; /* Command/status field */
u16 l4i_chk; /* CPU provided TCP checksum */
u16 byte_cnt; /* buffer byte count */
u32 buf_ptr; /* pointer to buffer for this descriptor */
u32 next_desc_ptr; /* Pointer to next descriptor */
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif
/* Unified struct for Rx and Tx operations. The user is not required to */
/* be familier with neither Tx nor Rx descriptors. */
struct pkt_info {
unsigned short byte_cnt; /* Descriptor buffer byte count */
unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
unsigned int cmd_sts; /* Descriptor command status */
dma_addr_t buf_ptr; /* Descriptor buffer pointer */
struct sk_buff * return_info; /* User resource return information */
};
/* Ethernet port specific infomation */
struct mv64340_private {
int port_num; /* User Ethernet port number */
u8 port_mac_addr[6]; /* User defined port MAC address. */
u32 port_config; /* User port configuration value */
u32 port_config_extend; /* User port config extend value */
u32 port_sdma_config; /* User port SDMA config value */
u32 port_serial_control; /* User port serial control value */
u32 port_tx_queue_command; /* Port active Tx queues summary */
u32 port_rx_queue_command; /* Port active Rx queues summary */
int rx_resource_err; /* Rx ring resource error flag */
int tx_resource_err; /* Tx ring resource error flag */
/* Tx/Rx rings managment indexes fields. For driver use */
/* Next available and first returning Rx resource */
int rx_curr_desc_q, rx_used_desc_q;
/* Next available and first returning Tx resource */
int tx_curr_desc_q, tx_used_desc_q;
#ifdef MV64340_CHECKSUM_OFFLOAD_TX
int tx_first_desc_q;
#endif
#ifdef MV64340_TX_FAST_REFILL
u32 tx_clean_threshold;
#endif
volatile struct eth_rx_desc * p_rx_desc_area;
dma_addr_t rx_desc_dma;
unsigned int rx_desc_area_size;
struct sk_buff * rx_skb[MV64340_RX_QUEUE_SIZE];
volatile struct eth_tx_desc * p_tx_desc_area;
dma_addr_t tx_desc_dma;
unsigned int tx_desc_area_size;
struct sk_buff * tx_skb[MV64340_TX_QUEUE_SIZE];
struct work_struct tx_timeout_task;
/*
* Former struct mv64340_eth_priv members start here
*/
struct net_device_stats stats;
spinlock_t lock;
/* Size of Tx Ring per queue */
unsigned int tx_ring_size;
/* Ammont of SKBs outstanding on Tx queue */
unsigned int tx_ring_skbs;
/* Size of Rx Ring per queue */
unsigned int rx_ring_size;
/* Ammount of SKBs allocated to Rx Ring per queue */
unsigned int rx_ring_skbs;
/*
* rx_task used to fill RX ring out of bottom half context
*/
struct work_struct rx_task;
/*
* Used in case RX Ring is empty, which can be caused when
* system does not have resources (skb's)
*/
struct timer_list timeout;
long rx_task_busy __attribute__ ((aligned(SMP_CACHE_BYTES)));
unsigned rx_timer_flag;
u32 rx_int_coal;
u32 tx_int_coal;
};
/* ethernet.h API list */
/* Port operation control routines */
static void eth_port_init(struct mv64340_private *mp);
static void eth_port_reset(unsigned int eth_port_num);
static int eth_port_start(struct mv64340_private *mp);
static void ethernet_set_config_reg(unsigned int eth_port_num,
unsigned int value);
static unsigned int ethernet_get_config_reg(unsigned int eth_port_num);
/* Port MAC address routines */
static void eth_port_uc_addr_set(unsigned int eth_port_num,
unsigned char *p_addr);
/* PHY and MIB routines */
static int ethernet_phy_reset(unsigned int eth_port_num);
static int eth_port_write_smi_reg(unsigned int eth_port_num,
unsigned int phy_reg,
unsigned int value);
static int eth_port_read_smi_reg(unsigned int eth_port_num,
unsigned int phy_reg,
unsigned int *value);
static void eth_clear_mib_counters(unsigned int eth_port_num);
/* Port data flow control routines */
static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private *mp,
struct pkt_info * p_pkt_info);
static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv64340_private *mp,
struct pkt_info * p_pkt_info);
static ETH_FUNC_RET_STATUS eth_port_receive(struct mv64340_private *mp,
struct pkt_info * p_pkt_info);
static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv64340_private *mp,
struct pkt_info * p_pkt_info);
#endif /* __MV64340_ETH_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment