Commit 50dc2599 authored by Jeff Garzik's avatar Jeff Garzik

Merge redhat.com:/home/jgarzik/repo/linus-2.5

into redhat.com:/home/jgarzik/repo/net-drivers-2.5
parents 2622cf1d 2bf2d024
......@@ -1222,6 +1222,20 @@ config PCNET32
module, say M here and read <file:Documentation/modules.txt> as well
as <file:Documentation/networking/net-modules.txt>.
config AMD8111_ETH
tristate "AMD 8111 (new PCI lance) support"
depends on NET_PCI && PCI
help
If you have an AMD 8111-based PCI lance ethernet card,
answer Y here and read the Ethernet-HOWTO, available from
<http://www.linuxdoc.org/docs.html#howto>.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called amd8111e.o. If you want to compile it as a
module, say M here and read <file:Documentation/modules.txt> as well
as <file:Documentation/networking/net-modules.txt>.
config ADAPTEC_STARFIRE
tristate "Adaptec Starfire/DuraLAN support"
depends on NET_PCI && PCI
......
......@@ -183,6 +183,7 @@ obj-$(CONFIG_MAC89x0) += mac89x0.o
obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_DL2K) += dl2k.o
obj-$(CONFIG_R8169) += r8169.o
obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
# non-drivers/net drivers who want mii lib
obj-$(CONFIG_PCMCIA_SMC91C92) += mii.o
......
......@@ -37,6 +37,7 @@ obj-$(CONFIG_VIA_RHINE) += crc32.o
obj-$(CONFIG_YELLOWFIN) += crc32.o
obj-$(CONFIG_WINBOND_840) += crc32.o
obj-$(CONFIG_R8169) += crc32.o
obj-$(CONFIG_AMD8111_ETH) += crc32.o
# These rely on drivers/net/7990.o which requires crc32.o
......
/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
* Copyright (C) 2002 Advanced Micro Devices
*
*
* Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
* Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
* Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
* Derived from the lance driver written 1993,1994,1995 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency.[ pcnet32.c ]
* Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
Module Name:
amd8111e.c
Abstract:
AMD8111 based 10/100 Ethernet Controller Driver.
Environment:
Kernel Mode
Revision History:
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/ctype.h>
#include <linux/crc32.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define AMD8111E_VLAN_TAG_USED 1
#else
#define AMD8111E_VLAN_TAG_USED 0
#endif
#include "amd8111e.h"
#define MODULE_NAME "amd8111e"
#define MODULE_VERSION "3.0.0"
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.0");
MODULE_LICENSE("GPL");
MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
static struct pci_device_id amd8111e_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
};
/*
This function will set PHY speed. During initialization sets the original speed to 100 full.
*/
static void amd8111e_set_ext_phy(struct net_device *dev)
{
struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
unsigned long reg_val = 0;
void * mmio = lp->mmio;
struct amd8111e_link_config *link_config = &lp->link_config;
if(!lp->opened){
/* Initializing SPEED_100 and DUPLEX_FULL as original values */
link_config->orig_speed = SPEED_100;
link_config->orig_duplex = DUPLEX_FULL;
link_config->orig_phy_option = XPHYSP |XPHYFD;
}
reg_val = lp->ext_phy_option;
/* Disable port manager */
writel((u32) EN_PMGR, mmio + CMD3 );
/* Reset PHY */
writel((u32)XPHYRST | lp->ext_phy_option, mmio + CTRL2);
/* Enable port manager */
writel((u32)VAL1 | EN_PMGR, mmio + CMD3 );
}
/*
This function will unmap skb->data space and will free
all transmit and receive skbuffs.
*/
static int amd8111e_free_skbs(struct net_device *dev)
{
struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
struct sk_buff* rx_skbuff;
int i;
/* Freeing transmit skbs */
for(i = 0; i < NUM_TX_BUFFERS; i++){
if(lp->tx_skbuff[i]){
pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
dev_kfree_skb (lp->tx_skbuff[i]);
lp->tx_skbuff[i] = NULL;
lp->tx_dma_addr[i] = 0;
}
}
/* Freeing previously allocated receive buffers */
for (i = 0; i < NUM_RX_BUFFERS; i++){
rx_skbuff = lp->rx_skbuff[i];
if(rx_skbuff != NULL){
pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[i]);
lp->rx_skbuff[i] = NULL;
lp->rx_dma_addr[i] = 0;
}
}
return 0;
}
/*
This will set the receive buffer length corresponding to the mtu size of network interface.
*/
static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
{
struct amd8111e_priv* lp = dev->priv;
unsigned int mtu = dev->mtu;
if (mtu > ETH_DATA_LEN){
/* MTU + ethernet header + FCS + optional VLAN tag */
lp->rx_buff_len = mtu + ETH_HLEN + 8;
lp->options |= OPTION_JUMBO_ENABLE;
} else{
lp->rx_buff_len = PKT_BUFF_SZ;
lp->options &= ~OPTION_JUMBO_ENABLE;
}
}
/*
This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
*/
static int amd8111e_init_ring(struct net_device *dev)
{
struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
int i;
lp->rx_idx = lp->tx_idx = 0;
lp->tx_complete_idx = 0;
lp->tx_ring_idx = 0;
if(lp->opened)
/* Free previously allocated transmit and receive skbs */
amd8111e_free_skbs(dev);
else{
/* allocate the tx and rx descriptors */
if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
&lp->tx_ring_dma_addr)) == NULL)
goto err_no_mem;
if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
&lp->rx_ring_dma_addr)) == NULL)
goto err_free_tx_ring;
}
/* Set new receive buff size */
amd8111e_set_rx_buff_len(dev);
/* Allocating receive skbs */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
/* Release previos allocated skbs */
for(--i; i >= 0 ;i--)
dev_kfree_skb(lp->rx_skbuff[i]);
goto err_free_rx_ring;
}
skb_reserve(lp->rx_skbuff[i],2);
}
/* Initilaizing receive descriptors */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len);
lp->rx_ring[i].rx_dr_offset10 = cpu_to_le16(OWN_BIT);
}
/* Initializing transmit descriptors */
for (i = 0; i < NUM_TX_RING_DR; i++) {
lp->tx_ring[i].buff_phy_addr = 0;
lp->tx_ring[i].tx_dr_offset2 = 0;
lp->tx_ring[i].buff_count = 0;
}
return 0;
err_free_rx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
lp->rx_ring_dma_addr);
err_free_tx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
lp->tx_ring_dma_addr);
err_no_mem:
return -ENOMEM;
}
/*
This function initializes the device registers and starts the device.
*/
static int amd8111e_restart(struct net_device *dev)
{
struct amd8111e_priv *lp = (struct amd8111e_priv* )dev->priv;
void * mmio = lp->mmio;
int i,reg_val;
/* stop the chip */
writel(RUN, mmio + CMD0);
if(amd8111e_init_ring(dev))
return -ENOMEM;
amd8111e_set_ext_phy(dev);
/* set control registers */
reg_val = readl(mmio + CTRL1);
writel( reg_val| XMTSP_128 | CACHE_ALIGN | B1_MASK, mmio + CTRL1 );
/* enable interrupt */
writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
/* initialize tx and rx ring base addresses */
writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
if(lp->options & OPTION_JUMBO_ENABLE){
writel((u32)VAL2|JUMBO, mmio + CMD3);
/* Reset REX_UFLO */
writel( REX_UFLO, mmio + CMD2);
/* Should not set REX_UFLO for jumbo frames */
writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
}else
writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
#if AMD8111E_VLAN_TAG_USED
writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
#endif
writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
/* Setting the MAC address to the device */
for(i = 0; i < ETH_ADDR_LEN; i++)
writeb( dev->dev_addr[i], mmio + PADR + i );
/* set RUN bit to start the chip */
writel(VAL2 | RDMD0, mmio + CMD0);
writel(VAL0 | INTREN | RUN, mmio + CMD0);
return 0;
}
/*
This function clears necessary the device registers.
*/
static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
{
unsigned int reg_val;
unsigned int logic_filter[2] ={0,};
void * mmio = lp->mmio;
/* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
writew( 0x8101, mmio + AUTOPOLL0);
/* Clear RCV_RING_BASE_ADDR */
writel(0, mmio + RCV_RING_BASE_ADDR0);
/* Clear XMT_RING_BASE_ADDR */
writel(0, mmio + XMT_RING_BASE_ADDR0);
writel(0, mmio + XMT_RING_BASE_ADDR1);
writel(0, mmio + XMT_RING_BASE_ADDR2);
writel(0, mmio + XMT_RING_BASE_ADDR3);
/* Clear CMD0 */
writel(CMD0_CLEAR,mmio + CMD0);
/* Clear CMD2 */
writel(CMD2_CLEAR, mmio +CMD2);
/* Clear CMD7 */
writel(CMD7_CLEAR , mmio + CMD7);
/* Clear DLY_INT_A and DLY_INT_B */
writel(0x0, mmio + DLY_INT_A);
writel(0x0, mmio + DLY_INT_B);
/* Clear FLOW_CONTROL */
writel(0x0, mmio + FLOW_CONTROL);
/* Clear INT0 write 1 to clear register */
reg_val = readl(mmio + INT0);
writel(reg_val, mmio + INT0);
/* Clear STVAL */
writel(0x0, mmio + STVAL);
/* Clear INTEN0 */
writel( INTEN0_CLEAR, mmio + INTEN0);
/* Clear LADRF */
writel(0x0 , mmio + LADRF);
/* Set SRAM_SIZE & SRAM_BOUNDARY registers */
writel( 0x80010,mmio + SRAM_SIZE);
/* Clear RCV_RING0_LEN */
writel(0x0, mmio + RCV_RING_LEN0);
/* Clear XMT_RING0/1/2/3_LEN */
writel(0x0, mmio + XMT_RING_LEN0);
writel(0x0, mmio + XMT_RING_LEN1);
writel(0x0, mmio + XMT_RING_LEN2);
writel(0x0, mmio + XMT_RING_LEN3);
/* Clear XMT_RING_LIMIT */
writel(0x0, mmio + XMT_RING_LIMIT);
/* Clear MIB */
writew(MIB_CLEAR, mmio + MIB_ADDR);
/* Clear LARF */
AMD8111E_WRITE_REG64(mmio, LADRF,logic_filter);
/* SRAM_SIZE register */
reg_val = readl(mmio + SRAM_SIZE);
if(lp->options & OPTION_JUMBO_ENABLE)
writel( VAL2|JUMBO, mmio + CMD3);
#if AMD8111E_VLAN_TAG_USED
writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
#endif
/* CMD2 register */
reg_val = readl(mmio + CMD2);
}
/*
This function disables the interrupt and clears all the pending
interrupts in INT0
*/
static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
{
u32 intr0;
/* Disable interrupt */
writel(INTREN, lp->mmio + CMD0);
/* Clear INT0 */
intr0 = readl(lp->mmio + INT0);
writel(intr0, lp->mmio + INT0);
}
/*
This function stops the chip.
*/
static void amd8111e_stop_chip(struct amd8111e_priv* lp)
{
writel(RUN, lp->mmio + CMD0);
}
/*
This function frees the transmiter and receiver descriptor rings.
*/
static void amd8111e_free_ring(struct amd8111e_priv* lp)
{
/* Free transmit and receive skbs */
amd8111e_free_skbs(lp->amd8111e_net_dev);
/* Free transmit and receive descriptor rings */
if(lp->rx_ring){
pci_free_consistent(lp->pci_dev,
sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
lp->rx_ring, lp->rx_ring_dma_addr);
lp->rx_ring = NULL;
}
if(lp->tx_ring){
pci_free_consistent(lp->pci_dev,
sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
lp->tx_ring, lp->tx_ring_dma_addr);
lp->tx_ring = NULL;
}
}
#if AMD8111E_VLAN_TAG_USED
/*
This is the receive indication function for packets with vlan tag.
*/
static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
{
return vlan_hwaccel_rx(skb, lp->vlgrp, vlan_tag);
}
#endif
/*
This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
*/
static int amd8111e_tx(struct net_device *dev)
{
struct amd8111e_priv* lp = dev->priv;
int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
int status;
/* Complete all the transmit packet */
while (lp->tx_complete_idx != lp->tx_idx){
tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
status = le16_to_cpu(lp->tx_ring[tx_index].tx_dr_offset2);
if(status & OWN_BIT)
break; /* It still hasn't been Txed */
lp->tx_ring[tx_index].buff_phy_addr = 0;
/* We must free the original skb */
if (lp->tx_skbuff[tx_index]) {
pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
lp->tx_skbuff[tx_index]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
lp->tx_skbuff[tx_index] = 0;
lp->tx_dma_addr[tx_index] = 0;
}
lp->tx_complete_idx++;
if (netif_queue_stopped(dev) &&
lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
/* The ring is no longer full, clear tbusy. */
netif_wake_queue (dev);
}
}
return 0;
}
/*
This function will check the ownership of receive buffers and descriptors. It will indicate to kernel up to half the number of maximum receive buffers in the descriptor ring, in a single receive interrupt. It will also replenish the descriptors with new skbs.
*/
static int amd8111e_rx(struct net_device *dev)
{
struct amd8111e_priv *lp = dev->priv;
struct sk_buff *skb,*new_skb;
int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
int min_pkt_len, status;
int num_rx_pkt = 0;
int max_rx_pkt = NUM_RX_BUFFERS/2;
short pkt_len;
#if AMD8111E_VLAN_TAG_USED
short vtag;
#endif
/* If we own the next entry, it's a new packet. Send it up. */
while(++num_rx_pkt <= max_rx_pkt){
if(lp->rx_ring[rx_index].rx_dr_offset10 & OWN_BIT)
return 0;
/* check if err summary bit is set */
if(le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10) & ERR_BIT){
/*
* There is a tricky error noted by John Murphy,
* <murf@perftech.com> to Russ Nelson: Even with full-sized
* buffers it's possible for a jabber packet to use two
* buffers, with only the last correctly noting the error. */
/* reseting flags */
lp->rx_ring[rx_index].rx_dr_offset10 &=
cpu_to_le16(RESET_RX_FLAGS);
goto err_next_pkt;
}
/* check for STP and ENP */
status = le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10);
if(!((status & STP_BIT) && (status & ENP_BIT))){
/* reseting flags */
lp->rx_ring[rx_index].rx_dr_offset10 &=
cpu_to_le16(RESET_RX_FLAGS);
goto err_next_pkt;
}
pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
#if AMD8111E_VLAN_TAG_USED
vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10) & TT_MASK;
/*MAC will strip vlan tag*/
if(lp->vlgrp != NULL && vtag !=0)
min_pkt_len =MIN_PKT_LEN - 4;
else
#endif
min_pkt_len =MIN_PKT_LEN;
if (pkt_len < min_pkt_len) {
lp->rx_ring[rx_index].rx_dr_offset10 &=
cpu_to_le16(RESET_RX_FLAGS);
lp->stats.rx_errors++;
goto err_next_pkt;
}
if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
/* if allocation fail,
ignore that pkt and go to next one */
lp->rx_ring[rx_index].rx_dr_offset10 &=
cpu_to_le16(RESET_RX_FLAGS);
lp->stats.rx_errors++;
goto err_next_pkt;
}
skb_reserve(new_skb, 2);
skb = lp->rx_skbuff[rx_index];
pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len);
skb->dev = dev;
lp->rx_skbuff[rx_index] = new_skb;
new_skb->dev = dev;
lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
skb->protocol = eth_type_trans(skb, dev);
#if AMD8111E_VLAN_TAG_USED
vtag = lp->rx_ring[rx_index].rx_dr_offset10 & TT_MASK;
if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
amd8111e_vlan_rx(lp, skb,
lp->rx_ring[rx_index].tag_ctrl_info);
} else
#endif
dev->last_rx = jiffies;
netif_rx (skb);
err_next_pkt:
lp->rx_ring[rx_index].buff_phy_addr
= cpu_to_le32(lp->rx_dma_addr[rx_index]);
lp->rx_ring[rx_index].buff_count =
cpu_to_le16(lp->rx_buff_len-2);
lp->rx_ring[rx_index].rx_dr_offset10 |= cpu_to_le16(OWN_BIT);
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
}
return 0;
}
/*
This function will store the original speed to restore later, if autoneg is turned on. This speed will be set later when the autoneg is turned off. If the link status indicates that link is down, that will be indicated to the kernel */
static int amd8111e_link_change(struct net_device* dev)
{
struct amd8111e_priv *lp = dev->priv;
int status0,speed;
/* read the link change */
status0 = readl(lp->mmio + STAT0);
if(status0 & LINK_STATS){
if(status0 & AUTONEG_COMPLETE){
/* keeping the original speeds */
if((lp->link_config.speed != SPEED_INVALID)&&
(lp->link_config.duplex != DUPLEX_INVALID)){
lp->link_config.orig_speed = lp->link_config.speed;
lp->link_config.orig_duplex = lp->link_config.duplex;
lp->link_config.orig_phy_option = lp->ext_phy_option;
}
lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID;
lp->link_config.autoneg = AUTONEG_ENABLE;
netif_carrier_on(dev);
return 0;
}
if(status0 & FULL_DPLX)
lp->link_config.duplex = DUPLEX_FULL;
else
lp->link_config.duplex = DUPLEX_HALF;
speed = (status0 & SPEED_MASK) >> 7;
if(speed == PHY_SPEED_10)
lp->link_config.speed = SPEED_10;
else if(speed == PHY_SPEED_100)
lp->link_config.speed = SPEED_100;
lp->link_config.autoneg = AUTONEG_DISABLE;
netif_carrier_on(dev);
}
else{
lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID;
lp->link_config.autoneg = AUTONEG_INVALID;
netif_carrier_off(dev);
}
return 0;
}
/*
This function reads the mib counters.
*/
static int amd8111e_read_mib(void* mmio, u8 MIB_COUNTER)
{
unsigned int status;
unsigned int data;
unsigned int repeat = REPEAT_CNT;
writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
do {
status = readw(mmio + MIB_ADDR);
udelay(2); /* controller takes MAX 2 us to get mib data */
}
while (--repeat && (status & MIB_CMD_ACTIVE));
data = readl(mmio + MIB_DATA);
return data;
}
/*
This function retuurns the reads the mib registers and returns the hardware statistics. It adds the previous statistics with new values.*/
static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
{
struct amd8111e_priv *lp = dev->priv;
void * mmio = lp->mmio;
unsigned long flags;
struct net_device_stats *prev_stats = &lp->prev_stats;
struct net_device_stats* new_stats = &lp->stats;
if(!lp->opened)
return prev_stats;
spin_lock_irqsave (&lp->lock, flags);
/* stats.rx_packets */
new_stats->rx_packets = prev_stats->rx_packets+
amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
amd8111e_read_mib(mmio, rcv_multicast_pkts)+
amd8111e_read_mib(mmio, rcv_unicast_pkts);
/* stats.tx_packets */
new_stats->tx_packets = prev_stats->tx_packets+
amd8111e_read_mib(mmio, xmt_packets);
/*stats.rx_bytes */
new_stats->rx_bytes = prev_stats->rx_bytes+
amd8111e_read_mib(mmio, rcv_octets);
/* stats.tx_bytes */
new_stats->tx_bytes = prev_stats->tx_bytes+
amd8111e_read_mib(mmio, xmt_octets);
/* stats.rx_errors */
new_stats->rx_errors = prev_stats->rx_errors+
amd8111e_read_mib(mmio, rcv_undersize_pkts)+
amd8111e_read_mib(mmio, rcv_fragments)+
amd8111e_read_mib(mmio, rcv_jabbers)+
amd8111e_read_mib(mmio, rcv_alignment_errors)+
amd8111e_read_mib(mmio, rcv_fcs_errors)+
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.tx_errors */
new_stats->tx_errors = prev_stats->tx_errors+
amd8111e_read_mib(mmio, xmt_underrun_pkts);
/* stats.rx_dropped*/
new_stats->rx_dropped = prev_stats->rx_dropped+
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.tx_dropped*/
new_stats->tx_dropped = prev_stats->tx_dropped+
amd8111e_read_mib(mmio, xmt_underrun_pkts);
/* stats.multicast*/
new_stats->multicast = prev_stats->multicast+
amd8111e_read_mib(mmio, rcv_multicast_pkts);
/* stats.collisions*/
new_stats->collisions = prev_stats->collisions+
amd8111e_read_mib(mmio, xmt_collisions);
/* stats.rx_length_errors*/
new_stats->rx_length_errors = prev_stats->rx_length_errors+
amd8111e_read_mib(mmio, rcv_undersize_pkts)+
amd8111e_read_mib(mmio, rcv_oversize_pkts);
/* stats.rx_over_errors*/
new_stats->rx_over_errors = prev_stats->rx_over_errors+
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.rx_crc_errors*/
new_stats->rx_crc_errors = prev_stats->rx_crc_errors+
amd8111e_read_mib(mmio, rcv_fcs_errors);
/* stats.rx_frame_errors*/
new_stats->rx_frame_errors = prev_stats->rx_frame_errors+
amd8111e_read_mib(mmio, rcv_alignment_errors);
/* stats.rx_fifo_errors */
new_stats->rx_fifo_errors = prev_stats->rx_fifo_errors+
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.rx_missed_errors */
new_stats->rx_missed_errors = prev_stats->rx_missed_errors+
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.tx_aborted_errors*/
new_stats->tx_aborted_errors = prev_stats->tx_aborted_errors+
amd8111e_read_mib(mmio, xmt_excessive_collision);
/* stats.tx_carrier_errors*/
new_stats->tx_carrier_errors = prev_stats->tx_carrier_errors+
amd8111e_read_mib(mmio, xmt_loss_carrier);
/* stats.tx_fifo_errors*/
new_stats->tx_fifo_errors = prev_stats->tx_fifo_errors+
amd8111e_read_mib(mmio, xmt_underrun_pkts);
/* stats.tx_window_errors*/
new_stats->tx_window_errors = prev_stats->tx_window_errors+
amd8111e_read_mib(mmio, xmt_late_collision);
spin_unlock_irqrestore (&lp->lock, flags);
return new_stats;
}
/*
This is device interrupt function. It handles transmit, receive and link change interrupts.
*/
static void amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device * dev = (struct net_device *) dev_id;
struct amd8111e_priv *lp = dev->priv;
void * mmio = lp->mmio;
unsigned int intr0;
if(dev == NULL)
return;
spin_lock (&lp->lock);
/* disabling interrupt */
writel(INTREN, mmio + CMD0);
/* Read interrupt status */
intr0 = readl(mmio + INT0);
/* Process all the INT event until INTR bit is clear. */
if (!(intr0 & INTR))
goto err_no_interrupt;
/* Current driver processes 3 interrupts : RINT,TINT,LCINT */
writel(intr0, mmio + INT0);
/* Check if Receive Interrupt has occurred. */
if(intr0 & RINT0){
amd8111e_rx(dev);
writel(VAL2 | RDMD0, mmio + CMD0);
}
/* Check if Transmit Interrupt has occurred. */
if(intr0 & TINT0)
amd8111e_tx(dev);
/* Check if Link Change Interrupt has occurred. */
if (intr0 & LCINT)
amd8111e_link_change(dev);
err_no_interrupt:
writel( VAL0 | INTREN,mmio + CMD0);
spin_unlock(&lp->lock);
return;
}
/*
This function closes the network interface and copies the new set of statistics into the previous statistics structure so that most recent statistics will be available after the interface is down.
*/
static int amd8111e_close(struct net_device * dev)
{
struct amd8111e_priv *lp = dev->priv;
netif_stop_queue(dev);
spin_lock_irq(&lp->lock);
amd8111e_disable_interrupt(lp);
amd8111e_stop_chip(lp);
amd8111e_free_ring(lp);
netif_carrier_off(lp->amd8111e_net_dev);
spin_unlock_irq(&lp->lock);
free_irq(dev->irq, dev);
memcpy(&lp->prev_stats,amd8111e_get_stats(dev), sizeof(lp->prev_stats));
lp->opened = 0;
return 0;
}
/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
*/
static int amd8111e_open(struct net_device * dev )
{
struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
dev->name, dev))
return -EAGAIN;
spin_lock_irq(&lp->lock);
amd8111e_init_hw_default(lp);
if(amd8111e_restart(dev)){
spin_unlock_irq(&lp->lock);
return -ENOMEM;
}
lp->opened = 1;
spin_unlock_irq(&lp->lock);
netif_start_queue(dev);
return 0;
}
/*
This function checks if there is any transmit descriptors available to queue more packet.
*/
static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
{
int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
if(lp->tx_skbuff[tx_index] != 0)
return -1;
else
return 0;
}
/*
This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
*/
static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
{
struct amd8111e_priv *lp = dev->priv;
int tx_index;
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags);
tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
lp->tx_skbuff[tx_index] = skb;
lp->tx_ring[tx_index].tx_dr_offset2 = 0;
#if AMD8111E_VLAN_TAG_USED
if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
lp->tx_ring[tx_index].tag_ctrl_cmd |=
cpu_to_le32(TCC_VLAN_INSERT);
lp->tx_ring[tx_index].tag_ctrl_info =
cpu_to_le16(vlan_tx_tag_get(skb));
}
#endif
lp->tx_dma_addr[tx_index] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
lp->tx_ring[tx_index].buff_phy_addr =
(u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
/* Set FCS and LTINT bits */
lp->tx_ring[tx_index].tx_dr_offset2 |=
cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
lp->tx_idx++;
/* Trigger an immediate send poll. */
writel( VAL1 | TDMD0, lp->mmio + CMD0);
writel( VAL2 | RDMD0,lp->mmio + CMD0);
dev->trans_start = jiffies;
if(amd8111e_tx_queue_avail(lp) < 0){
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
}
/*
This function returns all the memory mapped registers of the device.
*/
static char* amd8111e_read_regs(struct amd8111e_priv* lp)
{
void * mmio = lp->mmio;
unsigned char * reg_buff;
int i;
reg_buff = kmalloc( AMD8111E_REG_DUMP_LEN,GFP_KERNEL);
if(NULL == reg_buff)
return NULL;
for( i=0; i< AMD8111E_REG_DUMP_LEN;i+=4);
reg_buff[i]= readl(mmio + i);
return reg_buff;
}
/*
This function sets promiscuos mode, all-multi mode or the multicast address
list to the device.
*/
static void amd8111e_set_multicast_list(struct net_device *dev)
{
struct dev_mc_list* mc_ptr;
struct amd8111e_priv *lp = dev->priv;
u32 mc_filter[2] ;
int i,bit_num;
if(dev->flags & IFF_PROMISC){
printk("%s: Setting promiscuous mode.\n",dev->name);
writel( VAL2 | PROM, lp->mmio + CMD2);
return;
}
else
writel( PROM, lp->mmio + CMD2);
if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
/* get all multicast packet */
mc_filter[1] = mc_filter[0] = 0xffffffff;
lp->mc_list = dev->mc_list;
lp->options |= OPTION_MULTICAST_ENABLE;
AMD8111E_WRITE_REG64(lp->mmio, LADRF,mc_filter);
return;
}
if( dev->mc_count == 0 ){
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
lp->mc_list = 0;
lp->options &= ~OPTION_MULTICAST_ENABLE;
AMD8111E_WRITE_REG64(lp->mmio, LADRF,mc_filter);
/* disable promiscous mode */
writel(PROM, lp->mmio + CMD2);
return;
}
/* load all the multicast addresses in the logic filter */
lp->options |= OPTION_MULTICAST_ENABLE;
lp->mc_list = dev->mc_list;
mc_filter[1] = mc_filter[0] = 0;
for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
i++, mc_ptr = mc_ptr->next) {
bit_num = ether_crc(ETH_ALEN, mc_ptr->dmi_addr) >> 26;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
AMD8111E_WRITE_REG64(lp->mmio, LADRF, mc_filter);
return;
}
/*
This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
*/
static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr)
{
struct amd8111e_priv *lp = dev->priv;
struct pci_dev *pci_dev = lp->pci_dev;
u32 ethcmd;
if( useraddr == NULL)
return -EINVAL;
if(copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
return -EFAULT;
switch(ethcmd){
case ETHTOOL_GDRVINFO:{
struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
strcpy (info.driver, MODULE_NAME);
strcpy (info.version, MODULE_VERSION);
memset(&info.fw_version, 0, sizeof(info.fw_version));
strcpy (info.bus_info, pci_dev->slot_name);
info.eedump_len = 0;
info.regdump_len = AMD8111E_REG_DUMP_LEN;
if (copy_to_user (useraddr, &info, sizeof(info)))
return -EFAULT;
return 0;
}
case ETHTOOL_GSET:{
struct ethtool_cmd cmd = { ETHTOOL_GSET };
if (!lp->opened)
return -EAGAIN;
cmd.supported = SUPPORTED_Autoneg |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_MII;
cmd.advertising = ADVERTISED_Autoneg |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_MII;
cmd.speed = lp->link_config.speed;
cmd.duplex = lp->link_config.duplex;
cmd.port = 0;
cmd.phy_address = PHY_ID;
cmd.transceiver = XCVR_EXTERNAL;
cmd.autoneg = lp->link_config.autoneg;
cmd.maxtxpkt = 0; /* not implemented interrupt coalasing */
cmd.maxrxpkt = 0; /* not implemented interrupt coalasing */
if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
return -EFAULT;
return 0;
}
case ETHTOOL_SSET: {
struct ethtool_cmd cmd;
if (!lp->opened)
return -EAGAIN;
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
spin_lock_irq(&lp->lock);
if(cmd.autoneg == AUTONEG_ENABLE){
/* keeping the original speeds */
if((lp->link_config.speed != SPEED_INVALID)&&
(lp->link_config.duplex != DUPLEX_INVALID)){
lp->link_config.orig_speed = lp->link_config.speed;
lp->link_config.orig_duplex = lp->link_config.duplex;
lp->link_config.orig_phy_option = lp->ext_phy_option;
}
lp->ext_phy_option = XPHYANE;
}
else if(cmd.speed == SPEED_100 && cmd.duplex == DUPLEX_HALF)
lp->ext_phy_option = XPHYSP;
else if(cmd.speed == SPEED_100 && cmd.duplex == DUPLEX_FULL)
lp->ext_phy_option = XPHYSP |XPHYFD;
else if(cmd.speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
lp->ext_phy_option = 0;
else if(cmd.speed == SPEED_10 && cmd.duplex == DUPLEX_FULL)
lp->ext_phy_option = XPHYFD;
else {
/* setting the original speed */
cmd.speed = lp->link_config.orig_speed;
cmd.duplex = lp->link_config.orig_duplex;
lp->ext_phy_option = lp->link_config.orig_phy_option;
}
lp->link_config.autoneg = cmd.autoneg;
if (cmd.autoneg == AUTONEG_ENABLE) {
lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID;
} else {
lp->link_config.speed = cmd.speed;
lp->link_config.duplex = cmd.duplex;
}
amd8111e_set_ext_phy(dev);
spin_unlock_irq(&lp->lock);
return 0;
}
case ETHTOOL_GREGS: {
struct ethtool_regs regs;
u8 *regbuf;
int ret;
if (copy_from_user(&regs, useraddr, sizeof(regs)))
return -EFAULT;
if (regs.len > AMD8111E_REG_DUMP_LEN)
regs.len = AMD8111E_REG_DUMP_LEN;
regs.version = 0;
if (copy_to_user(useraddr, &regs, sizeof(regs)))
return -EFAULT;
regbuf = amd8111e_read_regs(lp);
if (!regbuf)
return -ENOMEM;
useraddr += offsetof(struct ethtool_regs, data);
ret = 0;
if (copy_to_user(useraddr, regbuf, regs.len))
ret = -EFAULT;
kfree(regbuf);
return ret;
}
case ETHTOOL_NWAY_RST: {
int ret;
spin_lock_irq(&lp->lock);
if(lp->link_config.autoneg == AUTONEG_ENABLE){
lp->ext_phy_option = XPHYANE;
amd8111e_set_ext_phy(dev);
ret = 0;
}else
ret = -EINVAL;
spin_unlock_irq(&lp->lock);
return ret;
}
case ETHTOOL_GLINK: {
struct ethtool_value val = { ETHTOOL_GLINK };
val.data = netif_carrier_ok(dev) ? 1 : 0;
if (copy_to_user(useraddr, &val, sizeof(val)))
return -EFAULT;
}
case ETHTOOL_GWOL: {
struct ethtool_wolinfo wol_info = { ETHTOOL_GWOL };
wol_info.supported = WAKE_MAGIC|WAKE_PHY;
wol_info.wolopts = 0;
if (lp->options & OPTION_WOL_ENABLE)
wol_info.wolopts = WAKE_MAGIC;
memset(&wol_info.sopass, 0, sizeof(wol_info.sopass));
if (copy_to_user(useraddr, &wol_info, sizeof(wol_info)))
return -EFAULT;
return 0;
}
case ETHTOOL_SWOL: {
struct ethtool_wolinfo wol_info;
if (copy_from_user(&wol_info, useraddr, sizeof(wol_info)))
return -EFAULT;
if (wol_info.wolopts & ~(WAKE_MAGIC |WAKE_PHY))
return -EINVAL;
spin_lock_irq(&lp->lock);
if(wol_info.wolopts & WAKE_MAGIC)
lp->options |=
(OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
else if(wol_info.wolopts & WAKE_PHY)
lp->options |=
(OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
else
lp->options &= ~OPTION_WOL_ENABLE;
spin_unlock_irq(&lp->lock);
return 0;
}
default:
break;
}
return -EOPNOTSUPP;
}
static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
{
void * mmio = lp->mmio;
unsigned int reg_val;
unsigned int repeat= REPEAT_CNT;
reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE)
reg_val = readl( mmio + PHY_ACCESS );
writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16), mmio +PHY_ACCESS);
do{
reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
if(reg_val & PHY_RD_ERR)
goto err_phy_read;
*val = reg_val & 0xffff;
return 0;
err_phy_read:
*val = 0;
return -EINVAL;
}
static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
{
unsigned int repeat = REPEAT_CNT
void * mmio = lp->mmio;
unsigned int reg_val;
reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE)
reg_val = readl( mmio + PHY_ACCESS );
writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
do{
reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write the data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
if(reg_val & PHY_RD_ERR)
goto err_phy_write;
return 0;
err_phy_write:
return -EINVAL;
}
static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
struct amd8111e_priv *lp = dev->priv;
int err;
u32 mii_regval;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch(cmd) {
case SIOCETHTOOL:
return amd8111e_ethtool_ioctl(dev, (void *) ifr->ifr_data);
case SIOCGMIIPHY:
data->phy_id = PHY_ID;
/* fallthru */
case SIOCGMIIREG:
spin_lock_irq(&lp->lock);
err = amd8111e_read_phy(lp, data->phy_id,
data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
spin_unlock_irq(&lp->lock);
data->val_out = mii_regval;
return err;
case SIOCSMIIREG:
spin_lock_irq(&lp->lock);
err = amd8111e_write_phy(lp, data->phy_id,
data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
spin_unlock_irq(&lp->lock);
return err;
default:
/* do nothing */
break;
}
return -EOPNOTSUPP;
}
/*
This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
*/
int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
{
struct amd8111e_priv *lp = dev->priv;
int err;
if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
return -EINVAL;
if (!netif_running(dev)) {
/* new_mtu will be used
when device starts netxt time */
dev->mtu = new_mtu;
return 0;
}
spin_lock_irq(&lp->lock);
/* stop the chip */
writel(RUN, lp->mmio + CMD0);
dev->mtu = new_mtu;
/* if (new_mtu > ETH_DATA_LEN)
lp->options |= OPTION_JUMBO_ENABLE;
else
lp->options &= ~OPTION_JUMBO_ENABLE;
*/
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
netif_start_queue(dev);
return err;
}
#if AMD8111E_VLAN_TAG_USED
static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct amd8111e_priv *lp = dev->priv;
spin_lock_irq(&lp->lock);
lp->vlgrp = grp;
spin_unlock_irq(&lp->lock);
}
static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct amd8111e_priv *lp = dev->priv;
spin_lock_irq(&lp->lock);
if (lp->vlgrp)
lp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irq(&lp->lock);
}
#endif
static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
{
writel( VAL1|MPPLBA, lp->mmio + CMD3);
writel( VAL0|MPEN_SW, lp->mmio + CMD7);
return 0;
}
static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
{
/* Adapter is already stoped/suspended/interrupt-disabled */
writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
return 0;
}
/*
This function sets the power state of the device. When the device go to lower power states 1,2, and 3 it enables the wake on lan
*/
static int amd8111e_set_power_state(struct amd8111e_priv* lp, u32 state)
{
u16 power_control;
int pm = lp->pm_cap;
pci_read_config_word(lp->pci_dev,
pm + PCI_PM_CTRL,
&power_control);
power_control |= PCI_PM_CTRL_PME_STATUS;
power_control &= ~(PCI_PM_CTRL_STATE_MASK);
switch (state) {
case 0:
power_control |= 0;
pci_write_config_word(lp->pci_dev,
pm + PCI_PM_CTRL,
power_control);
return 0;
case 1:
power_control |= 1;
break;
case 2:
power_control |= 2;
break;
case 3:
power_control |= 3;
break;
default:
printk(KERN_WARNING "%s: Invalid power state (%d) requested.\n",
lp->amd8111e_net_dev->name, state);
return -EINVAL;
}
if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
amd8111e_enable_magicpkt(lp);
if(lp->options & OPTION_WAKE_PHY_ENABLE)
amd8111e_enable_link_change(lp);
/* Setting new power state. */
pci_write_config_word(lp->pci_dev, pm + PCI_PM_CTRL, power_control);
return 0;
}
static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
struct amd8111e_priv *lp = dev->priv;
int err;
if (!netif_running(dev))
return 0;
/* disable the interrupt */
spin_lock_irq(&lp->lock);
amd8111e_disable_interrupt(lp);
spin_unlock_irq(&lp->lock);
netif_device_detach(dev);
/* stop chip */
spin_lock_irq(&lp->lock);
amd8111e_stop_chip(lp);
spin_unlock_irq(&lp->lock);
err = amd8111e_set_power_state(lp, state);
if (err) {
spin_lock_irq(&lp->lock);
amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
netif_device_attach(dev);
}
return err;
}
static int amd8111e_resume(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
struct amd8111e_priv *lp = dev->priv;
int err;
if (!netif_running(dev))
return 0;
err = amd8111e_set_power_state(lp, 0);
if (err)
return err;
netif_device_attach(dev);
spin_lock_irq(&lp->lock);
amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
return 0;
}
static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
unregister_netdev(dev);
iounmap((void *) ((struct amd8111e_priv *)(dev->priv))->mmio);
kfree(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err,i,pm_cap;
unsigned long reg_addr,reg_len;
struct amd8111e_priv* lp;
struct net_device* dev;
unsigned int chip_version;
err = pci_enable_device(pdev);
if(err){
printk(KERN_ERR "amd8111e: Cannot enable new PCI device,"
"exiting.\n");
return err;
}
if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
printk(KERN_ERR "amd8111e: Cannot find PCI base address"
"exiting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
err = pci_request_regions(pdev, MODULE_NAME);
if(err){
printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
"exiting.\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
/* Find power-management capability. */
if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
printk(KERN_ERR "amd8111e: No Power Management capability, "
"exiting.\n");
goto err_free_reg;
}
/* Initialize DMA */
if(!pci_dma_supported(pdev, 0xffffffff)){
printk(KERN_ERR "amd8111e: DMA not supported,"
"exiting.\n");
goto err_free_reg;
} else
pdev->dma_mask = 0xffffffff;
reg_addr = pci_resource_start(pdev, 0);
reg_len = pci_resource_len(pdev, 0);
dev = alloc_etherdev(sizeof(struct amd8111e_priv));
if (!dev) {
printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
err = -ENOMEM;
goto err_free_reg;
}
SET_MODULE_OWNER(dev);
#if AMD8111E_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
dev->vlan_rx_register =amd8111e_vlan_rx_register;
dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
#endif
lp = dev->priv;
memset (lp, 0, sizeof (*lp));
lp->pci_dev = pdev;
lp->amd8111e_net_dev = dev;
lp->pm_cap = pm_cap;
spin_lock_init(&lp->lock);
lp->mmio = ioremap(reg_addr, reg_len);
if (lp->mmio == 0) {
printk(KERN_ERR "amd8111e: Cannot map device registers, "
"exiting\n");
err = -ENOMEM;
goto err_free_dev;
}
/* Initializing MAC address */
for(i = 0; i < ETH_ADDR_LEN; i++)
dev->dev_addr[i] =readb(lp->mmio + PADR + i);
/* Setting user defined speed */
if (speed_duplex[card_idx] > sizeof(speed_duplex_mapping))
lp->ext_phy_option = XPHYANE;
else
lp->ext_phy_option =
speed_duplex_mapping[speed_duplex[card_idx]];
/* Initialize driver entry points */
dev->open = amd8111e_open;
dev->hard_start_xmit = amd8111e_start_xmit;
dev->stop = amd8111e_close;
dev->get_stats = amd8111e_get_stats;
dev->set_multicast_list = amd8111e_set_multicast_list;
dev->do_ioctl = amd8111e_ioctl;
dev->change_mtu = amd8111e_change_mtu;
dev->irq =pdev->irq;
#if AMD8111E_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register =amd8111e_vlan_rx_register;
dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
#endif
/* Set receive buffer length and set jumbo option*/
amd8111e_set_rx_buff_len(dev);
/* dev->tx_timeout = tg3_tx_timeout; */
/* dev->watchdog_timeo = TG3_TX_TIMEOUT; */
err = register_netdev(dev);
if (err) {
printk(KERN_ERR "amd8111e: Cannot register net device, "
"exiting.\n");
goto err_iounmap;
}
pci_set_drvdata(pdev, dev);
/* display driver and device information */
chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
printk("%s: AMD-8111e Driver Version: %s\n",dev->name,MODULE_VERSION);
printk("%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version);
for (i = 0; i < 6; i++)
printk("%2.2x%c", dev->dev_addr[i],i == 5 ? ' ' : ':');
printk("\n");
return 0;
err_iounmap:
iounmap((void *) lp->mmio);
err_free_dev:
kfree(dev);
err_free_reg:
pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static struct pci_driver amd8111e_driver = {
name: MODULE_NAME,
id_table: amd8111e_pci_tbl,
probe: amd8111e_probe_one,
remove: __devexit_p(amd8111e_remove_one),
suspend: amd8111e_suspend,
resume: amd8111e_resume
};
static int __init amd8111e_init(void)
{
return pci_module_init(&amd8111e_driver);
}
static void __exit amd8111e_cleanup(void)
{
pci_unregister_driver(&amd8111e_driver);
}
module_init(amd8111e_init);
module_exit(amd8111e_cleanup);
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
Module Name:
amd8111e.h
Abstract:
AMD8111 based 10/100 Ethernet Controller driver definitions.
Environment:
Kernel Mode
Revision History:
*/
#ifndef _AMD811E_H
#define _AMD811E_H
/* Hardware definitions */
#define B31_MASK 0x80000000
#define B30_MASK 0X40000000
#define B29_MASK 0x20000000
#define B28_MASK 0x10000000
#define B27_MASK 0x08000000
#define B26_MASK 0x04000000
#define B25_MASK 0x02000000
#define B24_MASK 0x01000000
#define B23_MASK 0x00800000
#define B22_MASK 0x00400000
#define B21_MASK 0x00200000
#define B20_MASK 0x00100000
#define B19_MASK 0x00080000
#define B18_MASK 0x00040000
#define B17_MASK 0x00020000
#define B16_MASK 0x00010000
#define B15_MASK 0x8000
#define B14_MASK 0x4000
#define B13_MASK 0x2000
#define B12_MASK 0x1000
#define B11_MASK 0x0800
#define B10_MASK 0x0400
#define B9_MASK 0x0200
#define B8_MASK 0x0100
#define B7_MASK 0x0080
#define B6_MASK 0x0040
#define B5_MASK 0x0020
#define B4_MASK 0x0010
#define B3_MASK 0x0008
#define B2_MASK 0x0004
#define B1_MASK 0x0002
#define B0_MASK 0x0001
/* PCI register offset */
#define PCI_ID_REG 0x00
#define PCI_COMMAND_REG 0x04
/* #define MEMEN_BIT B1_MASK */
/* #define IOEN_BIT B0_MASK */
#define PCI_REV_ID_REG 0x08
#define PCI_MEM_BASE_REG 0x10
/* #define MEMBASE_MASK 0xFFFFF000 */
/* #define MEMBASE_SIZE 4096 */
#define PCI_INTR_REG 0x3C
#define PCI_STATUS_REG 0x06
#define PCI_CAP_ID_REG_OFFSET 0x34
#define PCI_PMC_REG_OFFSET 0x36
#define PCI_PMCSR_REG_OFFSET 0x38
/* #define NEW_CAP 0x0010 */
#define PME_EN 0x0100
#define PARTID_MASK 0xFFFFF000
#define PARTID_START_BIT 12
/* #define LANCE_DWIO_RESET_PORT 0x18
#define LANCE_WIO_RESET_PORT 0x14 */
#define MIB_OFFSET 0x28
/* Command style register access
Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register.
eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered.
*/
/* Offset for Memory Mapped Registers. */
/* 32 bit registers */
#define ASF_STAT 0x00 /* ASF status register */
#define CHIPID 0x04 /* Chip ID regsiter */
#define MIB_DATA 0x10 /* MIB data register */
#define MIB_ADDR 0x14 /* MIB address register */
#define STAT0 0x30 /* Status0 register */
#define INT0 0x38 /* Interrupt0 register */
#define INTEN0 0x40 /* Interrupt0 enable register*/
#define CMD0 0x48 /* Command0 register */
#define CMD2 0x50 /* Command2 register */
#define CMD3 0x54 /* Command3 resiter */
#define CMD7 0x64 /* Command7 register */
#define CTRL1 0x6C /* Control1 register */
#define CTRL2 0x70 /* Control2 register */
#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */
#define AUTOPOLL0 0x88 /* Auto-poll0 register */
#define AUTOPOLL1 0x8A /* Auto-poll1 register */
#define AUTOPOLL2 0x8C /* Auto-poll2 register */
#define AUTOPOLL3 0x8E /* Auto-poll3 register */
#define AUTOPOLL4 0x90 /* Auto-poll4 register */
#define AUTOPOLL5 0x92 /* Auto-poll5 register */
#define AP_VALUE 0x98 /* Auto-poll value register */
#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */
#define DLY_INT_B 0xAC /* Group B delayed interrupt register */
#define FLOW_CONTROL 0xC8 /* Flow control register */
#define PHY_ACCESS 0xD0 /* PHY access register */
#define STVAL 0xD8 /* Software timer value register */
#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */
#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */
#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */
#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */
#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */
#define PMAT0 0x190 /* OnNow pattern register0 */
#define PMAT1 0x194 /* OnNow pattern register1 */
/* 16bit registers */
#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */
#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */
#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */
#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */
#define RCV_RING_LEN0 0x150 /* Transmit Ring0 length register */
#define SRAM_SIZE 0x178 /* SRAM size register */
#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */
/* 48bit register */
#define PADR 0x160 /* Physical address register */
/* 64bit register */
#define LADRF 0x168 /* Logical address filter register */
/* 8bit regsisters */
#define IFS1 0x18C /* Inter-frame spacing Part1 register */
#define IFS 0x18D /* Inter-frame spacing register */
/* Register Bit Definitions */
/* STAT_ASF 0x00, 32bit register */
#define ASF_INIT_DONE B1_MASK
#define ASF_INIT_PRESENT B0_MASK
/* MIB_ADDR 0x14, 16bit register */
#define MIB_CMD_ACTIVE B15_MASK
#define MIB_RD_CMD B13_MASK
#define MIB_CLEAR B12_MASK
#define MIB_ADDRESS 0x0000003F /* 5:0 */
/* QOS_ADDR 0x1C, 16bit register */
#define QOS_CMD_ACTIVE B15_MASK
#define QOS_WR_CMD B14_MASK
#define QOS_RD_CMD B13_MASK
#define QOS_ADDRESS 0x0000001F /* 4:0 */
/* STAT0 0x30, 32bit register */
#define PAUSE_PEND B14_MASK
#define PAUSING B13_MASK
#define PMAT_DET B12_MASK
#define MP_DET B11_MASK
#define LC_DET B10_MASK
#define SPEED_MASK 0x0380 /* 9:7 */
#define FULL_DPLX B6_MASK
#define LINK_STATS B5_MASK
#define AUTONEG_COMPLETE B4_MASK
#define MIIPD B3_MASK
#define RX_SUSPENDED B2_MASK
#define TX_SUSPENDED B1_MASK
#define RUNNING B0_MASK
#define PHY_SPEED_10 0x2
#define PHY_SPEED_100 0x3
/* INT0 0x38, 32bit register */
#define INTR B31_MASK
#define PCSINT B28_MASK
#define LCINT B27_MASK
#define APINT5 B26_MASK
#define APINT4 B25_MASK
#define APINT3 B24_MASK
#define TINT_SUM B23_MASK
#define APINT2 B22_MASK
#define APINT1 B21_MASK
#define APINT0 B20_MASK
#define MIIPDTINT B19_MASK
#define MCCIINT B18_MASK
#define MCCINT B17_MASK
#define MREINT B16_MASK
#define RINT_SUM B15_MASK
#define SPNDINT B14_MASK
#define MPINT B13_MASK
#define SINT B12_MASK
#define TINT3 B11_MASK
#define TINT2 B10_MASK
#define TINT1 B9_MASK
#define TINT0 B8_MASK
#define UINT B7_MASK
#define STINT B4_MASK
#define RINT3 B3_MASK
#define RINT2 B2_MASK
#define RINT1 B1_MASK
#define RINT0 B0_MASK
/* INTEN0 0x40, 32bit register */
#define VAL3 B31_MASK /* VAL bit for byte 3 */
#define VAL2 B23_MASK /* VAL bit for byte 2 */
#define VAL1 B15_MASK /* VAL bit for byte 1 */
#define VAL0 B7_MASK /* VAL bit for byte 0 */
/* VAL3 */
#define PSCINTEN B28_MASK
#define LCINTEN B27_MASK
#define APINT5EN B26_MASK
#define APINT4EN B25_MASK
#define APINT3EN B24_MASK
/* VAL2 */
#define APINT2EN B22_MASK
#define APINT1EN B21_MASK
#define APINT0EN B20_MASK
#define MIIPDTINTEN B19_MASK
#define MCCIINTEN B18_MASK
#define MCCINTEN B17_MASK
#define MREINTEN B16_MASK
/* VAL1 */
#define SPNDINTEN B14_MASK
#define MPINTEN B13_MASK
#define SINTEN B12_MASK
#define TINTEN3 B11_MASK
#define TINTEN2 B10_MASK
#define TINTEN1 B9_MASK
#define TINTEN0 B8_MASK
/* VAL0 */
#define STINTEN B4_MASK
#define RINTEN3 B3_MASK
#define RINTEN2 B2_MASK
#define RINTEN1 B1_MASK
#define RINTEN0 B0_MASK
#define INTEN0_CLEAR 0x1F7F7F1F /* Command style register */
/* CMD0 0x48, 32bit register */
/* VAL2 */
#define RDMD3 B19_MASK
#define RDMD2 B18_MASK
#define RDMD1 B17_MASK
#define RDMD0 B16_MASK
/* VAL1 */
#define TDMD3 B11_MASK
#define TDMD2 B10_MASK
#define TDMD1 B9_MASK
#define TDMD0 B8_MASK
/* VAL0 */
#define UINTCMD B6_MASK
#define RX_FAST_SPND B5_MASK
#define TX_FAST_SPND B4_MASK
#define RX_SPND B3_MASK
#define TX_SPND B2_MASK
#define INTREN B1_MASK
#define RUN B0_MASK
#define CMD0_CLEAR 0x000F0F7F /* Command style register */
/* CMD2 0x50, 32bit register */
/* VAL3 */
#define CONDUIT_MODE B29_MASK
/* VAL2 */
#define RPA B19_MASK
#define DRCVPA B18_MASK
#define DRCVBC B17_MASK
#define PROM B16_MASK
/* VAL1 */
#define ASTRP_RCV B13_MASK
#define FCOLL B12_MASK
#define EMBA B11_MASK
#define DXMT2PD B10_MASK
#define LTINTEN B9_MASK
#define DXMTFCS B8_MASK
/* VAL0 */
#define APAD_XMT B6_MASK
#define DRTY B5_MASK
#define INLOOP B4_MASK
#define EXLOOP B3_MASK
#define REX_RTRY B2_MASK
#define REX_UFLO B1_MASK
#define REX_LCOL B0_MASK
#define CMD2_CLEAR 0x3F7F3F7F /* Command style register */
/* CMD3 0x54, 32bit register */
/* VAL3 */
#define ASF_INIT_DONE_ALIAS B29_MASK
/* VAL2 */
#define JUMBO B21_MASK
#define VSIZE B20_MASK
#define VLONLY B19_MASK
#define VL_TAG_DEL B18_MASK
/* VAL1 */
#define EN_PMGR B14_MASK
#define INTLEVEL B13_MASK
#define FORCE_FULL_DUPLEX B12_MASK
#define FORCE_LINK_STATUS B11_MASK
#define APEP B10_MASK
#define MPPLBA B9_MASK
/* VAL0 */
#define RESET_PHY_PULSE B2_MASK
#define RESET_PHY B1_MASK
#define PHY_RST_POL B0_MASK
/* CMD7 0x64, 32bit register */
/* VAL0 */
#define PMAT_SAVE_MATCH B4_MASK
#define PMAT_MODE B3_MASK
#define MPEN_SW B1_MASK
#define LCMODE_SW B0_MASK
#define CMD7_CLEAR 0x0000001B /* Command style register */
/* CTRL0 0x68, 32bit register */
#define PHY_SEL 0x03000000 /* 25:24 */
#define RESET_PHY_WIDTH 0x00FF0000 /* 23:16 */
#define BSWP_REGS B10_MASK
#define BSWP_DESC B9_MASK
#define BSWP_DATA B8_MASK
#define CACHE_ALIGN B4_MASK
#define BURST_LIMIT 0x0000000F /* 3:0 */
/* CTRL1 0x6C, 32bit register */
#define SLOTMOD_MASK 0x03000000 /* 25:24 */
#define XMTSP_MASK 0x300 /* 17:16 */
#define XMTSP_128 0x200
#define XMTSP_64 0x100
#define CRTL1_DEFAULT 0x00000017
/* CTRL2 0x70, 32bit register */
#define FS_MASK 0x00070000 /* 18:16 */
#define FMDC_MASK 0x00000300 /* 9:8 */
#define XPHYRST B7_MASK
#define XPHYANE B6_MASK
#define XPHYFD B5_MASK
#define XPHYSP B3_MASK /* 4:3 */
#define APDW_MASK 0x00000007 /* 2:0 */
/* RCV_RING_CFG 0x78, 16bit register */
#define RCV_DROP3 B11_MASK
#define RCV_DROP2 B10_MASK
#define RCV_DROP1 B9_MASK
#define RCV_DROP0 B8_MASK
#define RCV_RING_DEFAULT 0x0030 /* 5:4 */
#define RCV_RING3_EN B3_MASK
#define RCV_RING2_EN B2_MASK
#define RCV_RING1_EN B1_MASK
#define RCV_RING0_EN B0_MASK
/* XMT_RING_LIMIT 0x7C, 32bit register */
#define XMT_RING2_LIMIT 0x00FF0000 /* 23:16 */
#define XMT_RING1_LIMIT 0x0000FF00 /* 15:8 */
#define XMT_RING0_LIMIT 0x000000FF /* 7:0 */
/* AUTOPOLL0 0x88, 16bit register */
#define AP_REG0_EN B15_MASK
#define AP_REG0_ADDR_MASK 0x1F00 /* 12:8 */
#define AP_PHY0_ADDR_MASK 0x001F /* 4:0 */
/* AUTOPOLL1 0x8A, 16bit register */
#define AP_REG1_EN B15_MASK
#define AP_REG1_ADDR_MASK 0x1F00 /* 12:8 */
#define AP_PRE_SUP1 B6_MASK
#define AP_PHY1_DFLT B5_MASK
#define AP_PHY1_ADDR_MASK 0x001F /* 4:0 */
/* AUTOPOLL2 0x8C, 16bit register */
#define AP_REG2_EN B15_MASK
#define AP_REG2_ADDR_MASK 0x1F00 /* 12:8 */
#define AP_PRE_SUP2 B6_MASK
#define AP_PHY2_DFLT B5_MASK
#define AP_PHY2_ADDR_MASK 0x001F /* 4:0 */
/* AUTOPOLL3 0x8E, 16bit register */
#define AP_REG3_EN B15_MASK
#define AP_REG3_ADDR_MASK 0x1F00 /* 12:8 */
#define AP_PRE_SUP3 B6_MASK
#define AP_PHY3_DFLT B5_MASK
#define AP_PHY3_ADDR_MASK 0x001F /* 4:0 */
/* AUTOPOLL4 0x90, 16bit register */
#define AP_REG4_EN B15_MASK
#define AP_REG4_ADDR_MASK 0x1F00 /* 12:8 */
#define AP_PRE_SUP4 B6_MASK
#define AP_PHY4_DFLT B5_MASK
#define AP_PHY4_ADDR_MASK 0x001F /* 4:0 */
/* AUTOPOLL5 0x92, 16bit register */
#define AP_REG5_EN B15_MASK
#define AP_REG5_ADDR_MASK 0x1F00 /* 12:8 */
#define AP_PRE_SUP5 B6_MASK
#define AP_PHY5_DFLT B5_MASK
#define AP_PHY5_ADDR_MASK 0x001F /* 4:0 */
/* AP_VALUE 0x98, 32bit ragister */
#define AP_VAL_ACTIVE B31_MASK
#define AP_VAL_RD_CMD B29_MASK
#define AP_ADDR 0x00070000 /* 18:16 */
#define AP_VAL 0x0000FFFF /* 15:0 */
/* PCS_ANEG 0x9C, 32bit register */
#define SYNC_LOST B10_MASK
#define IMATCH B9_MASK
#define CMATCH B8_MASK
#define PCS_AN_IDLE B1_MASK
#define PCS_AN_CFG B0_MASK
/* DLY_INT_A 0xA8, 32bit register */
#define DLY_INT_A_R3 B31_MASK
#define DLY_INT_A_R2 B30_MASK
#define DLY_INT_A_R1 B29_MASK
#define DLY_INT_A_R0 B28_MASK
#define DLY_INT_A_T3 B27_MASK
#define DLY_INT_A_T2 B26_MASK
#define DLY_INT_A_T1 B25_MASK
#define DLY_INT_A_T0 B24_MASK
#define EVENT_COUNT_A 0x00FF0000 /* 20:16 */
#define MAX_DELAY_TIME_A 0x000007FF /* 10:0 */
/* DLY_INT_B 0xAC, 32bit register */
#define DLY_INT_B_R3 B31_MASK
#define DLY_INT_B_R2 B30_MASK
#define DLY_INT_B_R1 B29_MASK
#define DLY_INT_B_R0 B28_MASK
#define DLY_INT_B_T3 B27_MASK
#define DLY_INT_B_T2 B26_MASK
#define DLY_INT_B_T1 B25_MASK
#define DLY_INT_B_T0 B24_MASK
#define EVENT_COUNT_B 0x00FF0000 /* 20:16 */
#define MAX_DELAY_TIME_B 0x000007FF /* 10:0 */
/* DFC_THRESH2 0xC0, 16bit register */
#define DFC_THRESH2_HIGH 0xFF00 /* 15:8 */
#define DFC_THRESH2_LOW 0x00FF /* 7:0 */
/* DFC_THRESH3 0xC2, 16bit register */
#define DFC_THRESH3_HIGH 0xFF00 /* 15:8 */
#define DFC_THRESH3_LOW 0x00FF /* 7:0 */
/* DFC_THRESH0 0xC4, 16bit register */
#define DFC_THRESH0_HIGH 0xFF00 /* 15:8 */
#define DFC_THRESH0_LOW 0x00FF /* 7:0 */
/* DFC_THRESH1 0xC6, 16bit register */
#define DFC_THRESH1_HIGH 0xFF00 /* 15:8 */
#define DFC_THRESH1_LOW 0x00FF /* 7:0 */
/* FLOW_CONTROL 0xC8, 32bit register */
#define PAUSE_LEN_CHG B30_MASK
#define FFC_EN B28_MASK
#define DFC_RING3_EN B27_MASK
#define DFC_RING2_EN B26_MASK
#define DFC_RING1_EN B25_MASK
#define DFC_RING0_EN B24_MASK
#define FIXP_CONGEST B21_MASK
#define FPA B20_MASK
#define NPA B19_MASK
#define FIXP B18_MASK
#define FCPEN B17_MASK
#define FCCMD B16_MASK
#define PAUSE_LEN 0x0000FFFF /* 15:0 */
/* FFC THRESH 0xCC, 32bit register */
#define FFC_HIGH 0xFFFF0000 /* 31:16 */
#define FFC_LOW 0x0000FFFF /* 15:0 */
/* PHY_ ACCESS 0xD0, 32bit register */
#define PHY_CMD_ACTIVE B31_MASK
#define PHY_WR_CMD B30_MASK
#define PHY_RD_CMD B29_MASK
#define PHY_RD_ERR B28_MASK
#define PHY_PRE_SUP B27_MASK
#define PHY_ADDR 0x03E00000 /* 25:21 */
#define PHY_REG_ADDR 0x001F0000 /* 20:16 */
#define PHY_DATA 0x0000FFFF /* 15:0 */
/* LED0..3 0xE0..0xE6, 16bit register */
#define LEDOUT B15_MASK
#define LEDPOL B14_MASK
#define LEDDIS B13_MASK
#define LEDSTRETCH B12_MASK
#define LED1000 B8_MASK
#define LED100 B7_MASK
#define LEDMP B6_MASK
#define LEDFD B5_MASK
#define LEDLINK B4_MASK
#define LEDRCVMAT B3_MASK
#define LEDXMT B2_MASK
#define LEDRCV B1_MASK
#define LEDCOLOUT B0_MASK
/* EEPROM_ACC 0x17C, 16bit register */
#define PVALID B15_MASK
#define PREAD B14_MASK
#define EEDET B13_MASK
#define EEN B4_MASK
#define ECS B2_MASK
#define EESK B1_MASK
#define edi_edo b0_MASK
/* PMAT0 0x190, 32bit register */
#define PMR_ACTIVE B31_MASK
#define PMR_WR_CMD B30_MASK
#define PMR_RD_CMD B29_MASK
#define PMR_BANK B28_MASK
#define PMR_ADDR 0x007F0000 /* 22:16 */
#define PMR_B4 0x000000FF /* 15:0 */
/* PMAT1 0x194, 32bit register */
#define PMR_B3 0xFF000000 /* 31:24 */
#define PMR_B2 0x00FF0000 /* 23:16 */
#define PMR_B1 0x0000FF00 /* 15:8 */
#define PMR_B0 0x000000FF /* 7:0 */
/************************************************************************/
/* */
/* MIB counter definitions */
/* */
/************************************************************************/
#define rcv_miss_pkts 0x00
#define rcv_octets 0x01
#define rcv_broadcast_pkts 0x02
#define rcv_multicast_pkts 0x03
#define rcv_undersize_pkts 0x04
#define rcv_oversize_pkts 0x05
#define rcv_fragments 0x06
#define rcv_jabbers 0x07
#define rcv_unicast_pkts 0x08
#define rcv_alignment_errors 0x09
#define rcv_fcs_errors 0x0A
#define rcv_good_octets 0x0B
#define rcv_mac_ctrl 0x0C
#define rcv_flow_ctrl 0x0D
#define rcv_pkts_64_octets 0x0E
#define rcv_pkts_65to127_octets 0x0F
#define rcv_pkts_128to255_octets 0x10
#define rcv_pkts_256to511_octets 0x11
#define rcv_pkts_512to1023_octets 0x12
#define rcv_pkts_1024to1518_octets 0x13
#define rcv_unsupported_opcode 0x14
#define rcv_symbol_errors 0x15
#define rcv_drop_pkts_ring1 0x16
#define rcv_drop_pkts_ring2 0x17
#define rcv_drop_pkts_ring3 0x18
#define rcv_drop_pkts_ring4 0x19
#define rcv_jumbo_pkts 0x1A
#define xmt_underrun_pkts 0x20
#define xmt_octets 0x21
#define xmt_packets 0x22
#define xmt_broadcast_pkts 0x23
#define xmt_multicast_pkts 0x24
#define xmt_collisions 0x25
#define xmt_unicast_pkts 0x26
#define xmt_one_collision 0x27
#define xmt_multiple_collision 0x28
#define xmt_deferred_transmit 0x29
#define xmt_late_collision 0x2A
#define xmt_excessive_defer 0x2B
#define xmt_loss_carrier 0x2C
#define xmt_excessive_collision 0x2D
#define xmt_back_pressure 0x2E
#define xmt_flow_ctrl 0x2F
#define xmt_pkts_64_octets 0x30
#define xmt_pkts_65to127_octets 0x31
#define xmt_pkts_128to255_octets 0x32
#define xmt_pkts_256to511_octets 0x33
#define xmt_pkts_512to1023_octets 0x34
#define xmt_pkts_1024to1518_octet 0x35
#define xmt_oversize_pkts 0x36
#define xmt_jumbo_pkts 0x37
/* Driver definitions */
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
#define MAX_UNITS 16 /* Maximum number of devices possible */
#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */
#define NUM_RX_BUFFERS 32 /* Number of receive buffers */
#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */
#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */
#define NUM_TX_RING_DR 32
#define NUM_RX_RING_DR 32
#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */
#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */
#define MAX_FILTER_SIZE 64 /* Maximum multicast address */
#define AMD8111E_MIN_MTU 60
#define AMD8111E_MAX_MTU 9000
#define PKT_BUFF_SZ 1536
#define MIN_PKT_LEN 60
#define ETH_ADDR_LEN 6
#define OPTION_VLAN_ENABLE 0x0001
#define OPTION_JUMBO_ENABLE 0x0002
#define OPTION_MULTICAST_ENABLE 0x0004
#define OPTION_WOL_ENABLE 0x0008
#define OPTION_WAKE_MAGIC_ENABLE 0x0010
#define OPTION_WAKE_PHY_ENABLE 0x0020
#define PHY_REG_ADDR_MASK 0x1f
/* Assume contoller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10;
/* amd8111e decriptor flag definitions */
#define OWN_BIT B15_MASK
#define ADD_FCS_BIT B13_MASK
#define LTINT_BIT B12_MASK
#define STP_BIT B9_MASK
#define ENP_BIT B8_MASK
#define KILL_BIT B6_MASK
#define TCC_MASK 0x0003
#define TCC_VLAN_INSERT B1_MASK
#define TCC_VLAN_REPLACE 0x0003
#define RESET_RX_FLAGS 0x0000
#define ERR_BIT B14_MASK
#define FRAM_BIT B13_MASK
#define OFLO_BIT B12_MASK
#define CRC_BIT B11_MASK
#define PAM_BIT B6_MASK
#define LAFM_BIT B5_MASK
#define BAM_BIT B4_MASK
#define TT_MASK 0x000c
#define TT_VLAN_TAGGED 0x000c
#define TT_PRTY_TAGGED 0x0008
/* driver ioctl parameters */
#define PHY_ID 0x01 /* currently it is fixed */
#define AMD8111E_REG_DUMP_LEN 4096 /* Memory mapped register length */
/* amd8111e desriptor format */
struct amd8111e_tx_dr{
u16 buff_count; /* Size of the buffer pointed by this descriptor */
u16 tx_dr_offset2;
u16 tag_ctrl_info;
u16 tag_ctrl_cmd;
u32 buff_phy_addr;
u32 reserved;
};
struct amd8111e_rx_dr{
u32 reserved;
u16 msg_count; /* Received message len */
u16 tag_ctrl_info;
u16 buff_count; /* Len of the buffer pointed by descriptor. */
u16 rx_dr_offset10;
u32 buff_phy_addr;
};
struct amd8111e_link_config{
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
#define AUTONEG_INVALID 0xff
unsigned long orig_phy_option;
u16 speed;
u8 duplex;
u8 autoneg;
u16 orig_speed;
u8 orig_duplex;
u8 reserved; /* 32bit alignment */
};
struct amd8111e_priv{
struct amd8111e_tx_dr* tx_ring;
struct amd8111e_rx_dr* rx_ring;
dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */
dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */
const char *name;
struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
/* Transmit and recive skbs */
struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
/* Transmit and receive dma mapped addr */
dma_addr_t tx_dma_addr[NUM_TX_BUFFERS];
dma_addr_t rx_dma_addr[NUM_RX_BUFFERS];
/* Reg memory mapped address */
void * mmio;
spinlock_t lock; /* Guard lock */
unsigned long rx_idx, tx_idx; /* The next free ring entry */
unsigned long tx_complete_idx;
unsigned long tx_ring_complete_idx;
unsigned long tx_ring_idx;
int rx_buff_len; /* Buffer length of rx buffers */
int options; /* Options enabled/disabled for the device */
unsigned long ext_phy_option;
struct amd8111e_link_config link_config;
int pm_cap;
struct net_device *next;
#if AMD8111E_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
char opened;
struct net_device_stats stats;
struct net_device_stats prev_stats;
struct dev_mc_list* mc_list;
};
#define AMD8111E_READ_REG64(_memMapBase, _offset, _pUlData) \
*(u32*)(_pUlData) = readl(_memMapBase + (_offset)); \
*((u32*)(_pUlData))+1) = readl(_memMapBase + ((_offset)+4))
#define AMD8111E_WRITE_REG64(_memMapBase, _offset, _pUlData) \
writel(*(u32*)(_pUlData), _memMapBase + (_offset)); \
writel(*(u32*)((u8*)(_pUlData)+4), _memMapBase + ((_offset)+4)) \
/* maps the external speed options to internal value */
static unsigned char speed_duplex_mapping[] = {
XPHYANE, /* Auto-negotiation, speed_duplex option 0 */
0, /* 10M Half, speed_duplex option 1 */
XPHYFD, /* 10M Full, speed_duplex option 2 */
XPHYSP, /* 100M Half, speed_duplex option 3 */
XPHYFD | XPHYSP /* 100M Full, speed_duplex option 4 */
};
static int card_idx;
static int speed_duplex[MAX_UNITS] = { 0, };
#endif /* _AMD8111E_H */
......@@ -268,6 +268,7 @@ struct driver_stats {
#define SCB_CUC_NOOP 0
#define SCB_CUC_START BIT_4 /* CU Start */
#define SCB_CUC_RESUME BIT_5 /* CU Resume */
#define SCB_CUC_UNKNOWN BIT_7 /* CU unknown command */
/* Changed for 82558 enhancements */
#define SCB_CUC_STATIC_RESUME (BIT_5 | BIT_7) /* 82558/9 Static Resume */
#define SCB_CUC_DUMP_ADDR BIT_6 /* CU Dump Counters Address */
......@@ -953,6 +954,10 @@ struct e100_private {
u32 pci_state[16];
#endif
char ifname[IFNAMSIZ];
#ifdef E100_CU_DEBUG
u8 last_cmd;
u8 last_sub_cmd;
#endif
};
#define E100_AUTONEG 0
......@@ -964,7 +969,7 @@ struct e100_private {
/********* function prototypes *************/
extern void e100_isolate_driver(struct e100_private *bdp);
extern void e100_sw_reset(struct e100_private *bdp, u32 reset_cmd);
extern void e100_start_cu(struct e100_private *bdp, tcb_t *tcb);
extern u8 e100_start_cu(struct e100_private *bdp, tcb_t *tcb);
extern void e100_free_non_tx_cmd(struct e100_private *bdp,
nxmit_cb_entry_t *non_tx_cmd);
extern nxmit_cb_entry_t *e100_alloc_non_tx_cmd(struct e100_private *bdp);
......@@ -976,8 +981,10 @@ extern unsigned char e100_get_link_state(struct e100_private *bdp);
extern unsigned char e100_wait_scb(struct e100_private *bdp);
extern void e100_deisolate_driver(struct e100_private *bdp, u8 full_reset);
extern unsigned char e100_hw_reset_recover(struct e100_private *bdp,
u32 reset_cmd);
extern unsigned char e100_configure_device(struct e100_private *bdp);
#ifdef E100_CU_DEBUG
extern unsigned char e100_cu_unknown_state(struct e100_private *bdp);
#endif
#define ROM_TEST_FAIL 0x01
#define REGISTER_TEST_FAIL 0x02
......
......@@ -494,8 +494,7 @@ e100_config_long_rx(struct e100_private *bdp, unsigned char enable)
* e100_config_wol
* @bdp: atapter's private data struct
*
* This sets configuration options for Wake On LAN functionality (WOL) in the
* config record. WOL options are retrieved from wolinfo_wolopts in @bdp
* This sets configuration options for PHY and Magic Packet WoL
*/
void
e100_config_wol(struct e100_private *bdp)
......@@ -504,14 +503,21 @@ e100_config_wol(struct e100_private *bdp)
if (bdp->wolopts & WAKE_PHY) {
bdp->config[9] |= CB_LINK_STATUS_WOL;
E100_CONFIG(bdp, 9);
}
else {
/* Disable PHY WoL */
bdp->config[9] &= ~CB_LINK_STATUS_WOL;
}
if (!(bdp->wolopts & WAKE_MAGIC)) {
if (bdp->wolopts & WAKE_MAGIC) {
bdp->config[19] &= ~CB_DISABLE_MAGPAK_WAKE;
}
else {
/* Disable Magic Packet WoL */
bdp->config[19] |= CB_DISABLE_MAGPAK_WAKE;
E100_CONFIG(bdp, 19);
}
E100_CONFIG(bdp, 19);
spin_unlock_bh(&(bdp->config_lock));
}
......
......@@ -45,6 +45,21 @@
**********************************************************************/
/* Change Log
*
* 2.1.29 12/20/02
* o Bug fix: Device command timeout due to SMBus processing during init
* o Bug fix: Not setting/clearing I (Interrupt) bit in tcb correctly
* o Bug fix: Not using EEPROM WoL setting as default in ethtool
* o Bug fix: Not able to set autoneg on using ethtool when interface down
* o Bug fix: Not able to change speed/duplex using ethtool/mii
* when interface up
* o Bug fix: Ethtool shows autoneg on when forced to 100/Full
* o Bug fix: Compiler error when CONFIG_PROC_FS not defined
* o Bug fix: 2.5.44 e100 doesn't load with preemptive kernel enabled
* (sleep while holding spinlock)
* o Bug fix: 2.1.24-k1 doesn't display complete statistics
* o Bug fix: System panic due to NULL watchdog timer dereference during
* ifconfig down, rmmod and insmod
*
* 2.1.24 10/7/02
* o Bug fix: Wrong files under /proc/net/PRO_LAN_Adapters/ when interface
......@@ -56,21 +71,6 @@
* o Removed misleading printks
*
* 2.1.12 8/2/02
* o Feature: ethtool register dump
* o Bug fix: Driver passes wrong name to /proc/interrupts
* o Bug fix: Ethernet bridging not working
* o Bug fix: Promiscuous mode is not working
* o Bug fix: Checked return value from copy_from_user (William Stinson,
* wstinson@infonie.fr)
* o Bug fix: ARP wake on LAN fails
* o Bug fix: mii-diag does not update driver level's speed, duplex and
* re-configure flow control
* o Bug fix: Ethtool shows wrong speed/duplex when not connected
* o Bug fix: Ethtool shows wrong speed/duplex when reconnected if forced
* speed/duplex
* o Bug fix: PHY loopback diagnostic fails
*
* 2.1.6 7/5/02
*/
#include <linux/config.h>
......@@ -135,7 +135,7 @@ static void e100_non_tx_background(unsigned long);
/* Global Data structures and variables */
char e100_copyright[] __devinitdata = "Copyright (c) 2002 Intel Corporation";
char e100_driver_version[]="2.1.24-k2";
char e100_driver_version[]="2.1.29-k1";
const char *e100_full_driver_name = "Intel(R) PRO/100 Network Driver";
char e100_short_driver_name[] = "e100";
static int e100nics = 0;
......@@ -224,7 +224,7 @@ static void e100_check_options(int board, struct e100_private *bdp);
static void e100_set_int_option(int *, int, int, int, int, char *);
static void e100_set_bool_option(struct e100_private *bdp, int, u32, int,
char *);
unsigned char e100_wait_exec_cmplx(struct e100_private *, u32, u8);
unsigned char e100_wait_exec_cmplx(struct e100_private *, u32, u8, u8);
void e100_exec_cmplx(struct e100_private *, u32, u8);
/**
......@@ -443,9 +443,21 @@ e100_wait_exec_simple(struct e100_private *bdp, u8 scb_cmd_low)
if (!e100_wait_scb(bdp)) {
printk(KERN_DEBUG "e100: %s: e100_wait_exec_simple: failed\n",
bdp->device->name);
#ifdef E100_CU_DEBUG
printk(KERN_ERR "e100: %s: Last command (%x/%x) "
"timeout\n", bdp->device->name,
bdp->last_cmd, bdp->last_sub_cmd);
printk(KERN_ERR "e100: %s: Current simple command (%x) "
"can't be executed\n",
bdp->device->name, scb_cmd_low);
#endif
return false;
}
e100_exec_cmd(bdp, scb_cmd_low);
#ifdef E100_CU_DEBUG
bdp->last_cmd = scb_cmd_low;
bdp->last_sub_cmd = 0;
#endif
return true;
}
......@@ -458,12 +470,24 @@ e100_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd)
}
unsigned char
e100_wait_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd)
e100_wait_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd, u8 sub_cmd)
{
if (!e100_wait_scb(bdp)) {
#ifdef E100_CU_DEBUG
printk(KERN_ERR "e100: %s: Last command (%x/%x) "
"timeout\n", bdp->device->name,
bdp->last_cmd, bdp->last_sub_cmd);
printk(KERN_ERR "e100: %s: Current complex command "
"(%x/%x) can't be executed\n",
bdp->device->name, cmd, sub_cmd);
#endif
return false;
}
e100_exec_cmplx(bdp, phys_addr, cmd);
#ifdef E100_CU_DEBUG
bdp->last_cmd = cmd;
bdp->last_sub_cmd = sub_cmd;
#endif
return true;
}
......@@ -494,18 +518,23 @@ e100_wait_cus_idle(struct e100_private *bdp)
}
/**
* e100_dis_intr - disable interrupts
* e100_disable_clear_intr - disable and clear/ack interrupts
* @bdp: atapter's private data struct
*
* This routine disables interrupts at the hardware, by setting
* the M (mask) bit in the adapter's CSR SCB command word.
* It also clear/ack interrupts.
*/
static inline void
e100_dis_intr(struct e100_private *bdp)
e100_disable_clear_intr(struct e100_private *bdp)
{
u16 intr_status;
/* Disable interrupts on our PCI board by setting the mask bit */
writeb(SCB_INT_MASK, &bdp->scb->scb_cmd_hi);
readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
intr_status = readw(&bdp->scb->scb_status);
/* ack and clear intrs */
writew(intr_status, &bdp->scb->scb_status);
readw(&bdp->scb->scb_status);
}
/**
......@@ -623,7 +652,7 @@ e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
cal_checksum = e100_eeprom_calculate_chksum(bdp);
read_checksum = e100_eeprom_read(bdp, (bdp->eeprom_size - 1));
if (cal_checksum != read_checksum) {
printk(KERN_ERR "e100: Corrupted EERPROM on instance #%d\n",
printk(KERN_ERR "e100: Corrupted EEPROM on instance #%d\n",
e100nics);
rc = -ENODEV;
goto err_pci;
......@@ -673,12 +702,17 @@ e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
bdp->device->name);
}
/* Disabling all WOLs as initialization */
bdp->wolsupported = bdp->wolopts = 0;
if (bdp->rev_id >= D101A4_REV_ID) {
bdp->wolsupported = 0;
bdp->wolopts = 0;
/* Check if WoL is enabled on EEPROM */
if (e100_eeprom_read(bdp, EEPROM_ID_WORD) & BIT_5) {
if (bdp->rev_id >= D101A4_REV_ID)
bdp->wolsupported = WAKE_PHY | WAKE_MAGIC;
if (bdp->rev_id >= D101MA_REV_ID)
bdp->wolsupported |= WAKE_UCAST | WAKE_ARP;
/* Magic Packet WoL is enabled on device by default */
/* if EEPROM WoL bit is TRUE */
bdp->wolopts = WAKE_MAGIC;
}
......@@ -953,12 +987,12 @@ e100_open(struct net_device *dev)
goto err_exit;
}
if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE)) {
if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0)) {
rc = -EAGAIN;
goto err_exit;
}
if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE)) {
if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0)) {
rc = -EAGAIN;
goto err_exit;
}
......@@ -1235,7 +1269,8 @@ e100_init(struct e100_private *bdp)
printk(KERN_ERR "e100: hw init failed\n");
return false;
}
e100_dis_intr(bdp);
/* Interrupts are enabled after device reset */
e100_disable_clear_intr(bdp);
return true;
}
......@@ -1303,10 +1338,10 @@ e100_hw_init(struct e100_private *bdp, u32 reset_cmd)
e100_sw_reset(bdp, reset_cmd);
/* Load the CU BASE (set to 0, because we use linear mode) */
if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE))
if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
return false;
if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE))
if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
return false;
/* Load interrupt microcode */
......@@ -1590,6 +1625,12 @@ e100_watchdog(struct net_device *dev)
{
struct e100_private *bdp = dev->priv;
#ifdef E100_CU_DEBUG
if (e100_cu_unknown_state(bdp)) {
printk(KERN_ERR "e100: %s: CU unknown state in e100_watchdog\n",
dev->name);
}
#endif
if (!netif_running(dev)) {
return;
}
......@@ -1646,6 +1687,7 @@ e100_watchdog(struct net_device *dev)
if (netif_running(dev))
netif_wake_queue(dev);
} else {
if (netif_running(dev))
netif_stop_queue(dev);
}
......@@ -1774,11 +1816,8 @@ e100intr(int irq, void *dev_inst, struct pt_regs *regs)
return;
}
/* disable intr before we ack & after identifying the intr as ours */
e100_dis_intr(bdp);
writew(intr_status, &bdp->scb->scb_status); /* ack intrs */
readw(&bdp->scb->scb_status);
/* disable and ack intr */
e100_disable_clear_intr(bdp);
/* the device is closed, don't continue or else bad things may happen. */
if (!netif_running(dev)) {
......@@ -2208,10 +2247,11 @@ e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb)
*
* e100_start_cu must be called while holding the tx_lock !
*/
void
u8
e100_start_cu(struct e100_private *bdp, tcb_t *tcb)
{
unsigned long lock_flag;
u8 ret = true;
spin_lock_irqsave(&(bdp->bd_lock), lock_flag);
switch (bdp->next_cu_cmd) {
......@@ -2242,12 +2282,13 @@ e100_start_cu(struct e100_private *bdp, tcb_t *tcb)
"e100: %s: cu_start: timeout waiting for cu\n",
bdp->device->name);
if (!e100_wait_exec_cmplx(bdp, (u32) (tcb->tcb_phys),
SCB_CUC_START)) {
SCB_CUC_START, CB_TRANSMIT)) {
printk(KERN_DEBUG
"e100: %s: cu_start: timeout waiting for scb\n",
bdp->device->name);
e100_exec_cmplx(bdp, (u32) (tcb->tcb_phys),
SCB_CUC_START);
ret = false;
}
bdp->next_cu_cmd = RESUME_WAIT;
......@@ -2259,6 +2300,7 @@ e100_start_cu(struct e100_private *bdp, tcb_t *tcb)
bdp->last_tcb = tcb;
spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
return ret;
}
/* ====================================================================== */
......@@ -2306,8 +2348,9 @@ e100_selftest(struct e100_private *bdp, u32 *st_timeout, u32 *st_result)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 100 + 1);
/* disable interrupts since the're now enabled */
e100_dis_intr(bdp);
/* disable interrupts since they are enabled */
/* after device reset during selftest */
e100_disable_clear_intr(bdp);
/* if The First Self Test DWORD Still Zero, We've timed out. If the
* second DWORD is not zero then we have an error. */
......@@ -2405,7 +2448,7 @@ e100_start_ru(struct e100_private *bdp)
spin_lock(&bdp->bd_lock);
if (!e100_wait_exec_cmplx(bdp, rx_struct->dma_addr, SCB_RUC_START)) {
if (!e100_wait_exec_cmplx(bdp, rx_struct->dma_addr, SCB_RUC_START, 0)) {
printk(KERN_DEBUG
"e100: %s: start_ru: wait_scb failed\n",
bdp->device->name);
......@@ -2473,7 +2516,7 @@ e100_clr_cntrs(struct e100_private *bdp)
*pcmd_complete = 0;
wmb();
if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR))
if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR, 0))
return false;
/* wait 10 microseconds for the command to complete */
......@@ -2595,8 +2638,10 @@ e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
unsigned long lock_flag;
unsigned long expiration_time;
unsigned char rc = true;
u8 sub_cmd;
ntcb_hdr = (cb_header_t *) command->non_tx_cmd; /* get hdr of non tcb cmd */
sub_cmd = cpu_to_le16(ntcb_hdr->cb_cmd);
/* Set the Command Block to be the last command block */
ntcb_hdr->cb_cmd |= __constant_cpu_to_le16(CB_EL_BIT);
......@@ -2629,7 +2674,7 @@ e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
spin_lock_irqsave(&bdp->bd_lock, lock_flag);
if (!e100_wait_exec_cmplx(bdp, command->dma_addr, SCB_CUC_START)) {
if (!e100_wait_exec_cmplx(bdp, command->dma_addr, SCB_CUC_START, sub_cmd)) {
spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
rc = false;
goto exit;
......@@ -2649,6 +2694,10 @@ e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
yield();
spin_lock_bh(&(bdp->bd_non_tx_lock));
} else {
#ifdef E100_CU_DEBUG
printk(KERN_ERR "e100: %s: non-TX command (%x) "
"timeout\n", bdp->device->name, sub_cmd);
#endif
rc = false;
goto exit;
}
......@@ -2694,7 +2743,12 @@ e100_sw_reset(struct e100_private *bdp, u32 reset_cmd)
}
/* Mask off our interrupt line -- its unmasked after reset */
e100_dis_intr(bdp);
e100_disable_clear_intr(bdp);
#ifdef E100_CU_DEBUG
bdp->last_cmd = 0;
bdp->last_sub_cmd = 0;
#endif
}
/**
......@@ -2894,19 +2948,6 @@ e100_D101M_checksum(struct e100_private *bdp, struct sk_buff *skb)
void __devinit
e100_print_brd_conf(struct e100_private *bdp)
{
if (netif_carrier_ok(bdp->device)) {
printk(KERN_NOTICE
" Mem:0x%08lx IRQ:%d Speed:%d Mbps Dx:%s\n",
(unsigned long) bdp->device->mem_start,
bdp->device->irq, bdp->cur_line_speed,
(bdp->cur_dplx_mode == FULL_DUPLEX) ? "Full" : "Half");
} else {
printk(KERN_NOTICE
" Mem:0x%08lx IRQ:%d Speed:%d Mbps Dx:%s\n",
(unsigned long) bdp->device->mem_start,
bdp->device->irq, 0, "N/A");
}
/* Print the string if checksum Offloading was enabled */
if (bdp->flags & DF_CSUM_OFFLOAD)
printk(KERN_NOTICE " Hardware receive checksums enabled\n");
......@@ -3005,8 +3046,13 @@ e100_pci_setup(struct pci_dev *pcid, struct e100_private *bdp)
void
e100_isolate_driver(struct e100_private *bdp)
{
if (netif_running(bdp->device)) {
e100_dis_intr(bdp);
/* Check if interface is up */
/* NOTE: Can't use netif_running(bdp->device) because */
/* dev_close clears __LINK_STATE_START before calling */
/* e100_close (aka dev->stop) */
if (bdp->device->flags & IFF_UP) {
e100_disable_clear_intr(bdp);
del_timer_sync(&bdp->watchdog_timer);
del_timer_sync(&bdp->hwi_timer);
/* If in middle of cable diag, */
......@@ -3045,46 +3091,36 @@ e100_tcb_add_C_bit(struct e100_private *bdp)
}
/*
* Procedure: e100_hw_reset_recover
* Procedure: e100_configure_device
*
* Description: This routine will recover the hw after reset.
* Description: This routine will configure device
*
* Arguments:
* bdp - Ptr to this card's e100_bdconfig structure
* reset_cmd - s/w reset or selective reset.
*
* Returns:
* true upon success
* false upon failure
*/
unsigned char
e100_hw_reset_recover(struct e100_private *bdp, u32 reset_cmd)
e100_configure_device(struct e100_private *bdp)
{
bdp->last_tcb = NULL;
if (reset_cmd == PORT_SOFTWARE_RESET) {
/*load CU & RU base */
if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE)) {
if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
return false;
}
if (e100_load_microcode(bdp)) {
if (e100_load_microcode(bdp))
bdp->flags |= DF_UCODE_LOADED;
}
if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE)) {
if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
return false;
}
/* Issue the load dump counters address command */
if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys,
SCB_CUC_DUMP_ADDR)) {
if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR, 0))
return false;
}
if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr)) {
printk(KERN_ERR
"e100: e100_hw_reset_recover: "
printk(KERN_ERR "e100: e100_configure_device: "
"setup iaaddr failed\n");
return false;
}
......@@ -3092,8 +3128,8 @@ e100_hw_reset_recover(struct e100_private *bdp, u32 reset_cmd)
e100_set_multi_exec(bdp->device);
/* Change for 82558 enhancement */
/* If 82558/9 and if the user has enabled flow control, set up * the
* Flow Control Reg. in the CSR */
/* If 82558/9 and if the user has enabled flow control, set up */
/* flow Control Reg. in the CSR */
if ((bdp->flags & IS_BACHELOR)
&& (bdp->params.b_params & PRM_FC)) {
writeb(DFLT_FC_THLD,
......@@ -3102,8 +3138,6 @@ e100_hw_reset_recover(struct e100_private *bdp, u32 reset_cmd)
&bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff);
}
}
e100_force_config(bdp);
return true;
......@@ -3115,7 +3149,7 @@ e100_deisolate_driver(struct e100_private *bdp, u8 full_reset)
u32 cmd = full_reset ? PORT_SOFTWARE_RESET : PORT_SELECTIVE_RESET;
e100_sw_reset(bdp, cmd);
if (cmd == PORT_SOFTWARE_RESET) {
if (!e100_hw_reset_recover(bdp, cmd))
if (!e100_configure_device(bdp))
printk(KERN_ERR "e100: e100_deisolate_driver:"
" device configuration failed\n");
}
......@@ -3253,10 +3287,8 @@ static int
e100_ethtool_set_settings(struct net_device *dev, struct ifreq *ifr)
{
struct e100_private *bdp;
int current_duplex;
int e100_new_speed_duplex;
int ethtool_new_speed_duplex;
int speed_duplex_change_required;
struct ethtool_cmd ecmd;
if (!capable(CAP_NET_ADMIN)) {
......@@ -3264,48 +3296,33 @@ e100_ethtool_set_settings(struct net_device *dev, struct ifreq *ifr)
}
bdp = dev->priv;
if (netif_running(dev)) {
return -EBUSY;
}
if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd))) {
return -EFAULT;
}
current_duplex =
(bdp->cur_dplx_mode == HALF_DUPLEX) ? DUPLEX_HALF : DUPLEX_FULL;
speed_duplex_change_required = (ecmd.speed != bdp->cur_line_speed)
|| (ecmd.duplex != current_duplex);
if ((ecmd.autoneg == AUTONEG_ENABLE) && speed_duplex_change_required) {
return -EINVAL;
}
if ((ecmd.autoneg == AUTONEG_ENABLE)
&& (bdp->speed_duplex_caps & SUPPORTED_Autoneg)) {
bdp->params.e100_speed_duplex = E100_AUTONEG;
e100_set_speed_duplex(bdp);
} else {
if (speed_duplex_change_required) {
if (ecmd.speed == SPEED_10) {
if (ecmd.duplex == DUPLEX_HALF) {
e100_new_speed_duplex =
E100_SPEED_10_HALF;
ethtool_new_speed_duplex =
SUPPORTED_10baseT_Half;
} else {
e100_new_speed_duplex =
E100_SPEED_10_FULL;
ethtool_new_speed_duplex =
SUPPORTED_10baseT_Full;
}
} else {
if (ecmd.duplex == DUPLEX_HALF) {
e100_new_speed_duplex =
E100_SPEED_100_HALF;
ethtool_new_speed_duplex =
SUPPORTED_100baseT_Half;
} else {
e100_new_speed_duplex =
E100_SPEED_100_FULL;
......@@ -3322,7 +3339,6 @@ e100_ethtool_set_settings(struct net_device *dev, struct ifreq *ifr)
return -EOPNOTSUPP;
}
}
}
return 0;
}
......@@ -3838,9 +3854,6 @@ e100_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCSMIIREG:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (netif_running(dev)) {
return -EBUSY;
}
/* If reg = 0 && change speed/duplex */
if (data_ptr->reg_num == 0 &&
(data_ptr->val_in == (BMCR_ANENABLE | BMCR_ANRESTART) /* restart cmd */
......@@ -3860,10 +3873,9 @@ e100_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
bdp->params.e100_speed_duplex = E100_SPEED_10_HALF;
e100_set_speed_duplex(bdp);
}
else {
e100_mdi_write(bdp, data_ptr->reg_num, bdp->phy_addr,
data_ptr->val_in);
}
else
/* Only allows changing speed/duplex */
return -EINVAL;
break;
......@@ -3960,6 +3972,8 @@ e100_non_tx_background(unsigned long ptr)
struct e100_private *bdp = (struct e100_private *) ptr;
nxmit_cb_entry_t *active_command;
int restart = true;
cb_header_t *non_tx_cmd;
u8 sub_cmd;
spin_lock_bh(&(bdp->bd_non_tx_lock));
......@@ -3987,6 +4001,15 @@ e100_non_tx_background(unsigned long ptr)
&& time_before(jiffies, active_command->expiration_time)) {
goto exit;
} else {
non_tx_cmd = (cb_header_t *) active_command->non_tx_cmd;
sub_cmd = CB_CMD_MASK & le16_to_cpu(non_tx_cmd->cb_cmd);
#ifdef E100_CU_DEBUG
if (!(non_tx_cmd->cb_status
& __constant_cpu_to_le16(CB_STATUS_COMPLETE)))
printk(KERN_ERR "e100: %s: Queued "
"command (%x) timeout\n",
bdp->device->name, sub_cmd);
#endif
list_del(&(active_command->list_elem));
e100_free_non_tx_cmd(bdp, active_command);
}
......@@ -4009,9 +4032,10 @@ e100_non_tx_background(unsigned long ptr)
bdp->non_tx_command_state = E100_WAIT_NON_TX_FINISH;
active_command = list_entry(bdp->non_tx_cmd_list.next,
nxmit_cb_entry_t, list_elem);
sub_cmd = ((cb_header_t *) active_command->non_tx_cmd)->cb_cmd;
spin_lock_irq(&(bdp->bd_lock));
e100_wait_exec_cmplx(bdp, active_command->dma_addr,
SCB_CUC_START);
SCB_CUC_START, sub_cmd);
spin_unlock_irq(&(bdp->bd_lock));
active_command->expiration_time = jiffies + HZ;
cmd_type = CB_CMD_MASK &
......@@ -4084,9 +4108,11 @@ e100_suspend(struct pci_dev *pcid, u32 state)
e100_isolate_driver(bdp);
pci_save_state(pcid, bdp->pci_state);
/* Enable or disable WoL */
e100_do_wol(pcid, bdp);
/* If wol is enabled */
if (bdp->wolopts) {
e100_do_wol(pcid, bdp);
pci_enable_wake(pcid, 3, 1); /* Enable PME for power state D3 */
pci_set_power_state(pcid, 3); /* Set power state to D3. */
} else {
......@@ -4102,17 +4128,13 @@ e100_resume(struct pci_dev *pcid)
{
struct net_device *netdev = pci_get_drvdata(pcid);
struct e100_private *bdp = netdev->priv;
u8 full_reset = false;
pci_set_power_state(pcid, 0);
pci_enable_wake(pcid, 0, 0); /* Clear PME status and disable PME */
pci_restore_state(pcid, bdp->pci_state);
if (bdp->wolopts & (WAKE_UCAST | WAKE_ARP)) {
full_reset = true;
}
e100_deisolate_driver(bdp, full_reset);
/* Also do device full reset because device was in D3 state */
e100_deisolate_driver(bdp, true);
return 0;
}
......@@ -4239,3 +4261,20 @@ static void e100_hwi_restore(struct e100_private *bdp)
e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, control);
return;
}
#ifdef E100_CU_DEBUG
unsigned char
e100_cu_unknown_state(struct e100_private *bdp)
{
u8 scb_cmd_low;
u16 scb_status;
scb_cmd_low = bdp->scb->scb_cmd_low;
scb_status = le16_to_cpu(bdp->scb->scb_status);
/* If CU is active and executing unknown cmd */
if (scb_status & SCB_CUS_ACTIVE && scb_cmd_low & SCB_CUC_UNKNOWN)
return true;
else
return false;
}
#endif
......@@ -29,7 +29,7 @@
#include "e100_config.h"
extern u16 e100_eeprom_read(struct e100_private *, u16);
extern int e100_wait_exec_cmplx(struct e100_private *, u32,u8);
extern int e100_wait_exec_cmplx(struct e100_private *, u32,u8, u8);
extern void e100_phy_reset(struct e100_private *bdp);
extern void e100_phy_autoneg(struct e100_private *bdp);
extern void e100_phy_set_loopback(struct e100_private *bdp);
......@@ -95,11 +95,9 @@ e100_run_diag(struct net_device *dev, u64 *test_info, u32 flags)
test_info [E100_EEPROM_TEST_FAIL] = true;
}
e100_deisolate_driver(bdp, false);
/*Let card recover from the test*/
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ * 2);
e100_deisolate_driver(bdp, false);
return flags | (test_result ? 0 : ETH_TEST_FL_FAILED);
}
......@@ -128,7 +126,7 @@ e100_diag_selftest(struct net_device *dev)
}
}
e100_hw_reset_recover(bdp,PORT_SOFTWARE_RESET);
e100_configure_device(bdp);
return retval;
}
......@@ -166,13 +164,19 @@ e100_diag_loopback (struct net_device *dev)
{
u8 rc = 0;
printk(KERN_DEBUG "%s: PHY loopback test starts\n", dev->name);
e100_sw_reset(dev->priv, PORT_SELECTIVE_RESET);
if (!e100_diag_one_loopback(dev, PHY_LOOPBACK)) {
rc |= PHY_LOOPBACK;
}
printk(KERN_DEBUG "%s: PHY loopback test ends\n", dev->name);
printk(KERN_DEBUG "%s: MAC loopback test starts\n", dev->name);
e100_sw_reset(dev->priv, PORT_SELECTIVE_RESET);
if (!e100_diag_one_loopback(dev, MAC_LOOPBACK)) {
rc |= MAC_LOOPBACK;
}
printk(KERN_DEBUG "%s: MAC loopback test ends\n", dev->name);
return rc;
}
......@@ -341,12 +345,12 @@ static void
e100_diag_loopback_cu_ru_exec(struct e100_private *bdp)
{
/*load CU & RU base */
if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE))
printk("e100: SCB_CUC_LOAD_BASE failed\n");
if(!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE))
printk("e100: SCB_RUC_LOAD_BASE failed!\n");
if(!e100_wait_exec_cmplx(bdp, bdp->loopback.dma_handle, SCB_RUC_START))
printk("e100: SCB_RUC_START failed!\n");
if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
printk(KERN_ERR "e100: SCB_CUC_LOAD_BASE failed\n");
if(!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
printk(KERN_ERR "e100: SCB_RUC_LOAD_BASE failed!\n");
if(!e100_wait_exec_cmplx(bdp, bdp->loopback.dma_handle, SCB_RUC_START, 0))
printk(KERN_ERR "e100: SCB_RUC_START failed!\n");
bdp->next_cu_cmd = START_WAIT;
e100_start_cu(bdp, bdp->loopback.tcb);
......
......@@ -113,6 +113,7 @@ struct e1000_adapter;
#define E1000_DEFAULT_PBA 0x00000030
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_APME 4
/* only works for sizes that are powers of 2 */
#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
......
......@@ -306,12 +306,10 @@ e1000_ethtool_gwol(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
/* Fall Through */
default:
wol->supported = WAKE_PHY | WAKE_UCAST |
WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
wol->supported = WAKE_UCAST | WAKE_MCAST
| WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
if(adapter->wol & E1000_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
if(adapter->wol & E1000_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if(adapter->wol & E1000_WUFC_MC)
......@@ -343,13 +341,11 @@ e1000_ethtool_swol(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
/* Fall Through */
default:
if(wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
if(wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY))
return -EOPNOTSUPP;
adapter->wol = 0;
if(wol->wolopts & WAKE_PHY)
adapter->wol |= E1000_WUFC_LNKC;
if(wol->wolopts & WAKE_UCAST)
adapter->wol |= E1000_WUFC_EX;
if(wol->wolopts & WAKE_MCAST)
......
......@@ -26,10 +26,20 @@
*******************************************************************************/
#define __E1000_MAIN__
#include "e1000.h"
/* Change Log
*
* 4.4.19 11/27/02
* o Feature: Added user-settable knob for interrupt throttle rate (ITR).
* o Cleanup: removed large static array allocations.
* o Cleanup: C99 struct initializer format.
* o Bug fix: restore VLAN settings when interface is brought up.
* o Bug fix: return cleanly in probe if error in detecting MAC type.
* o Bug fix: Wake up on magic packet by default only if enabled in eeprom.
* o Bug fix: Validate MAC address in set_mac.
* o Bug fix: Throw away zero-length Tx skbs.
* o Bug fix: Make ethtool EEPROM acceses work on older versions of ethtool.
*
* 4.4.12 10/15/02
* o Clean up: use members of pci_device rather than direct calls to
......@@ -44,29 +54,12 @@
* o Clean up: Moved tx_timeout from interrupt context to process context
* using schedule_task.
*
* o Feature: merged in modified NAPI patch from Robert Olsson
* <Robert.Olsson@its.uu.se> Uppsala Univeristy, Sweden.
*
* 4.3.15 8/9/02
* o Converted from Dual BSD/GPL license to GPL license.
* o Clean up: use pci_[clear|set]_mwi rather than direct calls to
* pci_write_config_word.
* o Bug fix: added read-behind-write calls to post writes before delays.
* o Bug fix: removed mdelay busy-waits in interrupt context.
* o Clean up: direct clear of descriptor bits rather than using memset.
* o Bug fix: added wmb() for ia-64 between descritor writes and advancing
* descriptor tail.
* o Feature: added locking mechanism for asf functionality.
* o Feature: exposed two Tx and one Rx interrupt delay knobs for finer
* control over interurpt rate tuning.
* o Misc ethtool bug fixes.
*
* 4.3.2 7/5/02
*/
char e1000_driver_name[] = "e1000";
char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
char e1000_driver_version[] = "4.4.12-k1";
char e1000_driver_version[] = "4.4.19-k1";
char e1000_copyright[] = "Copyright (c) 1999-2002 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
......@@ -175,6 +168,7 @@ static void e1000_tx_timeout_task(struct net_device *dev);
static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
static int e1000_notify_netdev(struct notifier_block *, unsigned long event, void *ptr);
......@@ -274,6 +268,7 @@ e1000_up(struct e1000_adapter *adapter)
/* hardware has been reset, we need to reload some things */
e1000_set_multi(netdev);
e1000_restore_vlan(adapter);
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
......@@ -349,6 +344,7 @@ e1000_probe(struct pci_dev *pdev,
int mmio_len;
int pci_using_dac;
int i;
uint16_t eeprom_data;
if((i = pci_enable_device(pdev)))
return i;
......@@ -501,8 +497,9 @@ e1000_probe(struct pci_dev *pdev,
* enable the ACPI Magic Packet filter
*/
e1000_read_eeprom(&adapter->hw, EEPROM_INIT_CONTROL2_REG, &eeprom_data);
if((adapter->hw.mac_type >= e1000_82544) &&
(E1000_READ_REG(&adapter->hw, WUC) & E1000_WUC_APME))
(eeprom_data & E1000_EEPROM_APME))
adapter->wol |= E1000_WUFC_MAG;
/* reset the hardware with the new settings */
......@@ -583,6 +580,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
hw->subsystem_id = pdev->subsystem_device;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
......@@ -1144,6 +1142,9 @@ e1000_set_mac(struct net_device *netdev, void *p)
struct e1000_adapter *adapter = netdev->priv;
struct sockaddr *addr = p;
if(!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/* 82542 2.0 needs to be in reset to write receive address registers */
if(adapter->hw.mac_type == e1000_82542_rev2_0)
......@@ -1400,7 +1401,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb)
int f;
len = skb->len - skb->data_len;
i = (tx_ring->next_to_use + tx_ring->count - 1) % tx_ring->count;
count = 0;
......@@ -1507,11 +1507,16 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev->priv;
int tx_flags = 0, count;
int f;
count = TXD_USE_COUNT(skb->len - skb->data_len,
adapter->max_data_per_txd);
if(count == 0) {
dev_kfree_skb_any(skb);
return 0;
}
for(f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
adapter->max_data_per_txd);
......@@ -2392,6 +2397,21 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
e1000_write_vfta(&adapter->hw, index, vfta);
}
static void
e1000_restore_vlan(struct e1000_adapter *adapter)
{
e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
if(adapter->vlgrp) {
uint16_t vid;
for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if(!adapter->vlgrp->vlan_devices[vid])
continue;
e1000_vlan_rx_add_vid(adapter->netdev, vid);
}
}
}
static int
e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
{
......@@ -2437,14 +2457,19 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev->priv;
uint32_t ctrl, ctrl_ext, rctl, manc;
uint32_t ctrl, ctrl_ext, rctl, manc, status;
uint32_t wufc = adapter->wol;
netif_device_detach(netdev);
if(netif_running(netdev))
e1000_down(adapter);
if(adapter->wol) {
status = E1000_READ_REG(&adapter->hw, STATUS);
if(status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
if(wufc) {
e1000_setup_rctl(adapter);
e1000_set_multi(netdev);
......@@ -2474,7 +2499,7 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
}
E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
E1000_WRITE_REG(&adapter->hw, WUFC, adapter->wol);
E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
pci_enable_wake(pdev, 3, 1);
pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
} else {
......
......@@ -230,7 +230,6 @@ struct e1000_option {
} arg;
};
static int __devinit
e1000_validate_option(int *value, struct e1000_option *opt)
{
......@@ -481,6 +480,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
{ SPEED_10, "" },
{ SPEED_100, "" },
{ SPEED_1000, "" }};
struct e1000_option opt = {
.type = list_option,
.name = "Speed",
......@@ -496,6 +496,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
struct e1000_opt_list dplx_list[] = {{ 0, "" },
{ HALF_DUPLEX, "" },
{ FULL_DUPLEX, "" }};
struct e1000_option opt = {
.type = list_option,
.name = "Duplex",
......
......@@ -2383,6 +2383,7 @@ static struct pci_device_id eepro100_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
......
......@@ -124,13 +124,13 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
/* advertise only what has been requested */
advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (ADVERTISED_10baseT_Half)
if (ecmd->advertising & ADVERTISED_10baseT_Half)
tmp |= ADVERTISE_10HALF;
if (ADVERTISED_10baseT_Full)
if (ecmd->advertising & ADVERTISED_10baseT_Full)
tmp |= ADVERTISE_10FULL;
if (ADVERTISED_100baseT_Half)
if (ecmd->advertising & ADVERTISED_100baseT_Half)
tmp |= ADVERTISE_100HALF;
if (ADVERTISED_100baseT_Full)
if (ecmd->advertising & ADVERTISED_100baseT_Full)
tmp |= ADVERTISE_100FULL;
if (advert != tmp) {
mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
......
......@@ -164,6 +164,7 @@
#include <linux/delay.h>
#include <linux/rtnetlink.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
......@@ -1898,44 +1899,6 @@ static struct net_device_stats *get_stats(struct net_device *dev)
return &np->stats;
}
/**
* dp83815_crc - computer CRC for hash table entries
*
* Note - this is, for some reason, *not* the same function
* as ether_crc_le() or ether_crc(), though it uses the
* same big-endian polynomial.
*/
#define DP_POLYNOMIAL 0x04C11DB7
static unsigned dp83815_crc(int length, unsigned char *data)
{
u32 crc;
u8 cur_byte;
u8 msb;
u8 byte, bit;
crc = ~0;
for (byte=0; byte<length; byte++) {
cur_byte = *data++;
for (bit=0; bit<8; bit++) {
msb = crc >> 31;
crc <<= 1;
if (msb ^ (cur_byte & 1)) {
crc ^= DP_POLYNOMIAL;
crc |= 1;
}
cur_byte >>= 1;
}
}
crc >>= 23;
return (crc);
}
void set_bit_le(int offset, unsigned char * data)
{
data[offset >> 3] |= (1 << (offset & 0x07));
}
#define HASH_TABLE 0x200
static void __set_rx_mode(struct net_device *dev)
{
......@@ -1960,9 +1923,8 @@ static void __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
set_bit_le(
dp83815_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
mc_filter);
int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
mc_filter[i/8] |= (1 << (i & 0x07));
}
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptMulticast | AcceptMyPhys;
......
......@@ -1223,6 +1223,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct pci_dev *pdev)
lp->pdev = pdev;
memcpy((char *)&lp->srom,(char *)&bus.srom,sizeof(struct de4x5_srom));
lp->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
init_timer(&lp->timer);
de4x5_parse_params(dev);
/*
......
......@@ -9,9 +9,19 @@
extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
extern u32 bitreverse(u32 in);
#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)data, length)
/*
* Helpers for hash table generation of ethernet nics:
*
* Ethernet sends the least significant bit of a byte first, thus crc32_le
* is used. The output of crc32_le is bit reversed [most significant bit
* is in bit nr 0], thus it must be reversed before use. Except for
* nics that bit swap the result internally...
*/
#define ether_crc(length, data) bitreverse(crc32_le(~0, data, length))
#define ether_crc_le(length, data) crc32_le(~0, data, length)
#define ether_crc(length, data) crc32_be(~0, data, length)
#endif /* _LINUX_CRC32_H */
......@@ -265,8 +265,19 @@ u32 attribute((pure)) crc32_be(u32 crc, unsigned char const *p, size_t len)
}
#endif
u32 bitreverse(u32 x)
{
x = (x >> 16) | (x << 16);
x = (x >> 8 & 0x00ff00ff) | (x << 8 & 0xff00ff00);
x = (x >> 4 & 0x0f0f0f0f) | (x << 4 & 0xf0f0f0f0);
x = (x >> 2 & 0x33333333) | (x << 2 & 0xcccccccc);
x = (x >> 1 & 0x55555555) | (x << 1 & 0xaaaaaaaa);
return x;
}
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(crc32_be);
EXPORT_SYMBOL(bitreverse);
/*
* A brief CRC tutorial.
......@@ -412,16 +423,6 @@ buf_dump(char const *prefix, unsigned char const *buf, size_t len)
}
#endif
static u32 attribute((const)) bitreverse(u32 x)
{
x = (x >> 16) | (x << 16);
x = (x >> 8 & 0x00ff00ff) | (x << 8 & 0xff00ff00);
x = (x >> 4 & 0x0f0f0f0f) | (x << 4 & 0xf0f0f0f0);
x = (x >> 2 & 0x33333333) | (x << 2 & 0xcccccccc);
x = (x >> 1 & 0x55555555) | (x << 1 & 0xaaaaaaaa);
return x;
}
static void bytereverse(unsigned char *buf, size_t len)
{
while (len--) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment