Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
ee86da9d
Commit
ee86da9d
authored
May 27, 2004
by
Jeff Garzik
Browse files
Options
Browse Files
Download
Plain Diff
Merge redhat.com:/spare/repo/netdev-2.6/misc
into redhat.com:/spare/repo/net-drivers-2.6
parents
a0e8d714
8a6d6a46
Changes
26
Show whitespace changes
Inline
Side-by-side
Showing
26 changed files
with
6965 additions
and
2315 deletions
+6965
-2315
drivers/net/8139too.c
drivers/net/8139too.c
+4
-0
drivers/net/Kconfig
drivers/net/Kconfig
+33
-0
drivers/net/Makefile
drivers/net/Makefile
+1
-0
drivers/net/acenic.c
drivers/net/acenic.c
+244
-521
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000.h
+8
-0
drivers/net/e1000/e1000_ethtool.c
drivers/net/e1000/e1000_ethtool.c
+304
-414
drivers/net/e1000/e1000_hw.c
drivers/net/e1000/e1000_hw.c
+22
-11
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_hw.h
+9
-7
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_main.c
+106
-134
drivers/net/e1000/e1000_param.c
drivers/net/e1000/e1000_param.c
+56
-45
drivers/net/ibm_emac/Makefile
drivers/net/ibm_emac/Makefile
+12
-0
drivers/net/ibm_emac/ibm_emac.h
drivers/net/ibm_emac/ibm_emac.h
+263
-0
drivers/net/ibm_emac/ibm_emac_core.c
drivers/net/ibm_emac/ibm_emac_core.c
+1968
-0
drivers/net/ibm_emac/ibm_emac_core.h
drivers/net/ibm_emac/ibm_emac_core.h
+146
-0
drivers/net/ibm_emac/ibm_emac_debug.c
drivers/net/ibm_emac/ibm_emac_debug.c
+224
-0
drivers/net/ibm_emac/ibm_emac_mal.c
drivers/net/ibm_emac/ibm_emac_mal.c
+467
-0
drivers/net/ibm_emac/ibm_emac_mal.h
drivers/net/ibm_emac/ibm_emac_mal.h
+130
-0
drivers/net/ibm_emac/ibm_emac_phy.c
drivers/net/ibm_emac/ibm_emac_phy.c
+297
-0
drivers/net/ibm_emac/ibm_emac_phy.h
drivers/net/ibm_emac/ibm_emac_phy.h
+137
-0
drivers/net/ibm_emac/ibm_emac_rgmii.h
drivers/net/ibm_emac/ibm_emac_rgmii.h
+65
-0
drivers/net/ibm_emac/ibm_emac_tah.h
drivers/net/ibm_emac/ibm_emac_tah.h
+48
-0
drivers/net/ibm_emac/ibm_emac_zmii.h
drivers/net/ibm_emac/ibm_emac_zmii.h
+93
-0
drivers/net/wan/farsync.c
drivers/net/wan/farsync.c
+2179
-1181
drivers/net/wan/farsync.h
drivers/net/wan/farsync.h
+138
-2
include/linux/if.h
include/linux/if.h
+2
-0
include/linux/pci_ids.h
include/linux/pci_ids.h
+9
-0
No files found.
drivers/net/8139too.c
View file @
ee86da9d
...
@@ -171,7 +171,11 @@ static int debug = -1;
...
@@ -171,7 +171,11 @@ static int debug = -1;
* Receive ring size
* Receive ring size
* Warning: 64K ring has hardware issues and may lock up.
* Warning: 64K ring has hardware issues and may lock up.
*/
*/
#if defined(CONFIG_SH_DREAMCAST) || defined(CONFIG_EMBEDDED)
#define RX_BUF_IDX 1
/* 16K ring */
#else
#define RX_BUF_IDX 2
/* 32K ring */
#define RX_BUF_IDX 2
/* 32K ring */
#endif
#define RX_BUF_LEN (8192 << RX_BUF_IDX)
#define RX_BUF_LEN (8192 << RX_BUF_IDX)
#define RX_BUF_PAD 16
#define RX_BUF_PAD 16
#define RX_BUF_WRAP_PAD 2048
/* spare padding to handle lack of packet wrap */
#define RX_BUF_WRAP_PAD 2048
/* spare padding to handle lack of packet wrap */
...
...
drivers/net/Kconfig
View file @
ee86da9d
...
@@ -1210,6 +1210,39 @@ config IBMVETH
...
@@ -1210,6 +1210,39 @@ config IBMVETH
<file:Documentation/networking/net-modules.txt>. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called ibmveth.
be called ibmveth.
config IBM_EMAC
tristate "IBM PPC4xx EMAC driver support"
depends on 4xx
---help---
This driver supports the IBM PPC4xx EMAC family of on-chip
Ethernet controllers.
config IBM_EMAC_ERRMSG
bool "Verbose error messages"
depends on IBM_EMAC
config IBM_EMAC_RXB
int "Number of receive buffers"
depends on IBM_EMAC
default "128" if IBM_EMAC4
default "64"
config IBM_EMAC_TXB
int "Number of transmit buffers"
depends on IBM_EMAC
default "128" if IBM_EMAC4
default "8"
config IBM_EMAC_FGAP
int "Frame gap"
depends on IBM_EMAC
default "8"
config IBM_EMAC_SKBRES
int "Skb reserve amount"
depends on IBM_EMAC
default "0"
config NET_PCI
config NET_PCI
bool "EISA, VLB, PCI and on board controllers"
bool "EISA, VLB, PCI and on board controllers"
depends on NET_ETHERNET && (ISA || EISA || PCI)
depends on NET_ETHERNET && (ISA || EISA || PCI)
...
...
drivers/net/Makefile
View file @
ee86da9d
...
@@ -7,6 +7,7 @@ ifeq ($(CONFIG_ISDN_PPP),y)
...
@@ -7,6 +7,7 @@ ifeq ($(CONFIG_ISDN_PPP),y)
endif
endif
obj-$(CONFIG_E1000)
+=
e1000/
obj-$(CONFIG_E1000)
+=
e1000/
obj-$(CONFIG_IBM_EMAC)
+=
ibm_emac/
obj-$(CONFIG_IXGB)
+=
ixgb/
obj-$(CONFIG_IXGB)
+=
ixgb/
obj-$(CONFIG_BONDING)
+=
bonding/
obj-$(CONFIG_BONDING)
+=
bonding/
...
...
drivers/net/acenic.c
View file @
ee86da9d
...
@@ -131,7 +131,6 @@
...
@@ -131,7 +131,6 @@
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#endif
#endif
#if LINUX_VERSION_CODE >= 0x20400
static
struct
pci_device_id
acenic_pci_tbl
[]
=
{
static
struct
pci_device_id
acenic_pci_tbl
[]
=
{
{
PCI_VENDOR_ID_ALTEON
,
PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
,
{
PCI_VENDOR_ID_ALTEON
,
PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
,
PCI_ANY_ID
,
PCI_ANY_ID
,
PCI_CLASS_NETWORK_ETHERNET
<<
8
,
0xffff00
,
},
PCI_ANY_ID
,
PCI_ANY_ID
,
PCI_CLASS_NETWORK_ETHERNET
<<
8
,
0xffff00
,
},
...
@@ -156,37 +155,6 @@ static struct pci_device_id acenic_pci_tbl[] = {
...
@@ -156,37 +155,6 @@ static struct pci_device_id acenic_pci_tbl[] = {
{
}
{
}
};
};
MODULE_DEVICE_TABLE
(
pci
,
acenic_pci_tbl
);
MODULE_DEVICE_TABLE
(
pci
,
acenic_pci_tbl
);
#endif
#ifndef MODULE_LICENSE
#define MODULE_LICENSE(a)
#endif
#ifndef wmb
#define wmb() mb()
#endif
#ifndef __exit
#define __exit
#endif
#ifndef __devinit
#define __devinit __init
#endif
#ifndef SMP_CACHE_BYTES
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
#ifndef SET_MODULE_OWNER
#define SET_MODULE_OWNER(dev) do{} while(0)
#define ACE_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
#define ACE_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
#else
#define ACE_MOD_INC_USE_COUNT do{} while(0)
#define ACE_MOD_DEC_USE_COUNT do{} while(0)
#endif
#ifndef SET_NETDEV_DEV
#ifndef SET_NETDEV_DEV
#define SET_NETDEV_DEV(net, pdev) do{} while(0)
#define SET_NETDEV_DEV(net, pdev) do{} while(0)
...
@@ -198,151 +166,8 @@ MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
...
@@ -198,151 +166,8 @@ MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
#define ace_sync_irq(irq) synchronize_irq()
#define ace_sync_irq(irq) synchronize_irq()
#endif
#endif
#if LINUX_VERSION_CODE < 0x2051e
#ifndef offset_in_page
#define local_irq_save(flags) do{__save_flags(flags) ; \
#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
__cli();} while(0)
#define local_irq_restore(flags) __restore_flags(flags)
#endif
#if (LINUX_VERSION_CODE < 0x02030d)
#define pci_resource_start(dev, bar) dev->base_address[bar]
#elif (LINUX_VERSION_CODE < 0x02032c)
#define pci_resource_start(dev, bar) dev->resource[bar].start
#endif
#if (LINUX_VERSION_CODE < 0x02030e)
#define net_device device
#endif
#if (LINUX_VERSION_CODE < 0x02032a)
typedef
u32
dma_addr_t
;
static
inline
void
*
pci_alloc_consistent
(
struct
pci_dev
*
hwdev
,
size_t
size
,
dma_addr_t
*
dma_handle
)
{
void
*
virt_ptr
;
virt_ptr
=
kmalloc
(
size
,
GFP_KERNEL
);
if
(
!
virt_ptr
)
return
NULL
;
*
dma_handle
=
virt_to_bus
(
virt_ptr
);
return
virt_ptr
;
}
#define pci_free_consistent(cookie, size, ptr, dma_ptr) kfree(ptr)
#define pci_map_page(cookie, page, off, size, dir) \
virt_to_bus(page_address(page)+(off))
#define pci_unmap_page(cookie, address, size, dir)
#define pci_set_dma_mask(dev, mask) \
(((u64)(mask) & 0xffffffff00000000) == 0 ? 0 : -EIO)
#define pci_dma_supported(dev, mask) \
(((u64)(mask) & 0xffffffff00000000) == 0 ? 1 : 0)
#elif (LINUX_VERSION_CODE < 0x02040d)
/*
* 2.4.13 introduced pci_map_page()/pci_unmap_page() - for 2.4.12 and prior,
* fall back on pci_map_single()/pci_unnmap_single().
*
* We are guaranteed that the page is mapped at this point since
* pci_map_page() is only used upon valid struct skb's.
*/
static
inline
dma_addr_t
pci_map_page
(
struct
pci_dev
*
cookie
,
struct
page
*
page
,
unsigned
long
off
,
size_t
size
,
int
dir
)
{
void
*
page_virt
;
page_virt
=
page_address
(
page
);
if
(
!
page_virt
)
BUG
();
return
pci_map_single
(
cookie
,
(
page_virt
+
off
),
size
,
dir
);
}
#define pci_unmap_page(cookie, dma_addr, size, dir) \
pci_unmap_single(cookie, dma_addr, size, dir)
#endif
#if (LINUX_VERSION_CODE < 0x020412)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) 0
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do{} while(0)
#define pci_unmap_len(PTR, LEN_NAME) 0
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do{} while(0)
#endif
#if (LINUX_VERSION_CODE < 0x02032b)
/*
* SoftNet
*
* For pre-softnet kernels we need to tell the upper layer not to
* re-enter start_xmit() while we are in there. However softnet
* guarantees not to enter while we are in there so there is no need
* to do the netif_stop_queue() dance unless the transmit queue really
* gets stuck. This should also improve performance according to tests
* done by Aman Singla.
*/
#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
#define netif_wake_queue(dev) clear_bit(0, &dev->tbusy)
#define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
#define late_stop_netif_stop_queue(dev) do{} while(0)
#define early_stop_netif_stop_queue(dev) test_and_set_bit(0,&dev->tbusy)
#define early_stop_netif_wake_queue(dev) netif_wake_queue(dev)
static
inline
void
netif_start_queue
(
struct
net_device
*
dev
)
{
dev
->
tbusy
=
0
;
dev
->
interrupt
=
0
;
dev
->
start
=
1
;
}
#define ace_mark_net_bh() mark_bh(NET_BH)
#define netif_queue_stopped(dev) dev->tbusy
#define netif_running(dev) dev->start
#define ace_if_down(dev) do{dev->start = 0;} while(0)
#define tasklet_struct tq_struct
static
inline
void
tasklet_schedule
(
struct
tasklet_struct
*
tasklet
)
{
queue_task
(
tasklet
,
&
tq_immediate
);
mark_bh
(
IMMEDIATE_BH
);
}
static
inline
void
tasklet_init
(
struct
tasklet_struct
*
tasklet
,
void
(
*
func
)(
unsigned
long
),
unsigned
long
data
)
{
tasklet
->
next
=
NULL
;
tasklet
->
sync
=
0
;
tasklet
->
routine
=
(
void
(
*
)(
void
*
))
func
;
tasklet
->
data
=
(
void
*
)
data
;
}
#define tasklet_kill(tasklet) do{} while(0)
#else
#define late_stop_netif_stop_queue(dev) netif_stop_queue(dev)
#define early_stop_netif_stop_queue(dev) 0
#define early_stop_netif_wake_queue(dev) do{} while(0)
#define ace_mark_net_bh() do{} while(0)
#define ace_if_down(dev) do{} while(0)
#endif
#if (LINUX_VERSION_CODE >= 0x02031b)
#define NEW_NETINIT
#define ACE_PROBE_ARG void
#else
#define ACE_PROBE_ARG struct net_device *dev
#endif
#ifndef min_t
#define min_t(type,a,b) (((a)<(b))?(a):(b))
#endif
#ifndef ARCH_HAS_PREFETCHW
#ifndef prefetchw
#define prefetchw(x) do{} while(0)
#endif
#endif
#endif
#define ACE_MAX_MOD_PARMS 8
#define ACE_MAX_MOD_PARMS 8
...
@@ -595,58 +420,41 @@ static int max_rx_desc[ACE_MAX_MOD_PARMS];
...
@@ -595,58 +420,41 @@ static int max_rx_desc[ACE_MAX_MOD_PARMS];
static
int
tx_ratio
[
ACE_MAX_MOD_PARMS
];
static
int
tx_ratio
[
ACE_MAX_MOD_PARMS
];
static
int
dis_pci_mem_inval
[
ACE_MAX_MOD_PARMS
]
=
{
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
};
static
int
dis_pci_mem_inval
[
ACE_MAX_MOD_PARMS
]
=
{
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
};
MODULE_AUTHOR
(
"Jes Sorensen <jes@trained-monkey.org>"
);
MODULE_LICENSE
(
"GPL"
);
MODULE_DESCRIPTION
(
"AceNIC/3C985/GA620 Gigabit Ethernet driver"
);
MODULE_PARM
(
link
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
trace
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
tx_coal_tick
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
max_tx_desc
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
rx_coal_tick
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
max_rx_desc
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
tx_ratio
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM_DESC
(
link
,
"AceNIC/3C985/NetGear link state"
);
MODULE_PARM_DESC
(
trace
,
"AceNIC/3C985/NetGear firmware trace level"
);
MODULE_PARM_DESC
(
tx_coal_tick
,
"AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives"
);
MODULE_PARM_DESC
(
max_tx_desc
,
"AceNIC/3C985/GA620 max number of transmit descriptors to wait"
);
MODULE_PARM_DESC
(
rx_coal_tick
,
"AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives"
);
MODULE_PARM_DESC
(
max_rx_desc
,
"AceNIC/3C985/GA620 max number of receive descriptors to wait"
);
MODULE_PARM_DESC
(
tx_ratio
,
"AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)"
);
static
char
version
[]
__initdata
=
static
char
version
[]
__initdata
=
"acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk
\n
"
"acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk
\n
"
" http://home.cern.ch/~jes/gige/acenic.html
\n
"
;
" http://home.cern.ch/~jes/gige/acenic.html
\n
"
;
static
struct
net_device
*
root_dev
;
static
int
__devinit
acenic_probe_one
(
struct
pci_dev
*
pdev
,
const
struct
pci_device_id
*
id
)
static
int
probed
__initdata
=
0
;
int
__devinit
acenic_probe
(
ACE_PROBE_ARG
)
{
{
#ifdef NEW_NETINIT
struct
net_device
*
dev
;
struct
net_device
*
dev
;
#endif
struct
ace_private
*
ap
;
struct
ace_private
*
ap
;
struct
pci_dev
*
pdev
=
NULL
;
static
int
boards_found
;
int
boards_found
=
0
;
int
version_disp
;
if
(
probed
)
return
-
ENODEV
;
probed
++
;
version_disp
=
0
;
while
((
pdev
=
pci_find_class
(
PCI_CLASS_NETWORK_ETHERNET
<<
8
,
pdev
)))
{
if
(
!
((
pdev
->
vendor
==
PCI_VENDOR_ID_ALTEON
)
&&
((
pdev
->
device
==
PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
)
||
(
pdev
->
device
==
PCI_DEVICE_ID_ALTEON_ACENIC_COPPER
)))
&&
!
((
pdev
->
vendor
==
PCI_VENDOR_ID_3COM
)
&&
(
pdev
->
device
==
PCI_DEVICE_ID_3COM_3C985
))
&&
!
((
pdev
->
vendor
==
PCI_VENDOR_ID_NETGEAR
)
&&
((
pdev
->
device
==
PCI_DEVICE_ID_NETGEAR_GA620
)
||
(
pdev
->
device
==
PCI_DEVICE_ID_NETGEAR_GA620T
)))
&&
/*
* Farallon used the DEC vendor ID on their cards by
* mistake for a while
*/
!
((
pdev
->
vendor
==
PCI_VENDOR_ID_DEC
)
&&
(
pdev
->
device
==
PCI_DEVICE_ID_FARALLON_PN9000SX
))
&&
!
((
pdev
->
vendor
==
PCI_VENDOR_ID_ALTEON
)
&&
(
pdev
->
device
==
PCI_DEVICE_ID_FARALLON_PN9100T
))
&&
!
((
pdev
->
vendor
==
PCI_VENDOR_ID_SGI
)
&&
(
pdev
->
device
==
PCI_DEVICE_ID_SGI_ACENIC
)))
continue
;
dev
=
alloc_etherdev
(
sizeof
(
struct
ace_private
));
dev
=
alloc_etherdev
(
sizeof
(
struct
ace_private
));
if
(
dev
==
NULL
)
{
if
(
dev
==
NULL
)
{
printk
(
KERN_ERR
"acenic: Unable to allocate "
printk
(
KERN_ERR
"acenic: Unable to allocate "
"net_device structure!
\n
"
);
"net_device structure!
\n
"
);
break
;
return
-
ENOMEM
;
}
}
SET_MODULE_OWNER
(
dev
);
SET_MODULE_OWNER
(
dev
);
...
@@ -655,8 +463,6 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
...
@@ -655,8 +463,6 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
ap
=
dev
->
priv
;
ap
=
dev
->
priv
;
ap
->
pdev
=
pdev
;
ap
->
pdev
=
pdev
;
dev
->
open
=
&
ace_open
;
dev
->
hard_start_xmit
=
&
ace_start_xmit
;
dev
->
features
|=
NETIF_F_SG
|
NETIF_F_IP_CSUM
;
dev
->
features
|=
NETIF_F_SG
|
NETIF_F_IP_CSUM
;
#if ACENIC_DO_VLAN
#if ACENIC_DO_VLAN
dev
->
features
|=
NETIF_F_HW_VLAN_TX
|
NETIF_F_HW_VLAN_RX
;
dev
->
features
|=
NETIF_F_HW_VLAN_TX
|
NETIF_F_HW_VLAN_RX
;
...
@@ -668,26 +474,22 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
...
@@ -668,26 +474,22 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
dev
->
tx_timeout
=
&
ace_watchdog
;
dev
->
tx_timeout
=
&
ace_watchdog
;
dev
->
watchdog_timeo
=
5
*
HZ
;
dev
->
watchdog_timeo
=
5
*
HZ
;
}
}
dev
->
open
=
&
ace_open
;
dev
->
stop
=
&
ace_close
;
dev
->
stop
=
&
ace_close
;
dev
->
hard_start_xmit
=
&
ace_start_xmit
;
dev
->
get_stats
=
&
ace_get_stats
;
dev
->
get_stats
=
&
ace_get_stats
;
dev
->
set_multicast_list
=
&
ace_set_multicast_list
;
dev
->
set_multicast_list
=
&
ace_set_multicast_list
;
dev
->
do_ioctl
=
&
ace_ioctl
;
dev
->
do_ioctl
=
&
ace_ioctl
;
dev
->
set_mac_address
=
&
ace_set_mac_addr
;
dev
->
set_mac_address
=
&
ace_set_mac_addr
;
dev
->
change_mtu
=
&
ace_change_mtu
;
dev
->
change_mtu
=
&
ace_change_mtu
;
/* display version info if adapter is found */
if
(
!
version_disp
)
{
/* set display flag to TRUE so that */
/* we only display this string ONCE */
/* we only display this string ONCE */
version_disp
=
1
;
if
(
!
boards_found
)
printk
(
version
);
printk
(
version
);
}
if
(
pci_enable_device
(
pdev
))
{
if
(
pci_enable_device
(
pdev
))
free_netdev
(
dev
);
goto
fail_free_netdev
;
continue
;
}
/*
/*
* Enable master mode before we start playing with the
* Enable master mode before we start playing with the
...
@@ -709,12 +511,10 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
...
@@ -709,12 +511,10 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
wmb
();
wmb
();
}
}
pci_read_config_byte
(
pdev
,
PCI_LATENCY_TIMER
,
pci_read_config_byte
(
pdev
,
PCI_LATENCY_TIMER
,
&
ap
->
pci_latency
);
&
ap
->
pci_latency
);
if
(
ap
->
pci_latency
<=
0x40
)
{
if
(
ap
->
pci_latency
<=
0x40
)
{
ap
->
pci_latency
=
0x40
;
ap
->
pci_latency
=
0x40
;
pci_write_config_byte
(
pdev
,
PCI_LATENCY_TIMER
,
pci_write_config_byte
(
pdev
,
PCI_LATENCY_TIMER
,
ap
->
pci_latency
);
ap
->
pci_latency
);
}
}
/*
/*
...
@@ -728,7 +528,7 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
...
@@ -728,7 +528,7 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
printk
(
KERN_ERR
"%s: Unable to map I/O register, "
printk
(
KERN_ERR
"%s: Unable to map I/O register, "
"AceNIC %i will be disabled.
\n
"
,
"AceNIC %i will be disabled.
\n
"
,
dev
->
name
,
boards_found
);
dev
->
name
,
boards_found
);
break
;
goto
fail_free_netdev
;
}
}
switch
(
pdev
->
vendor
)
{
switch
(
pdev
->
vendor
)
{
...
@@ -774,6 +574,7 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
...
@@ -774,6 +574,7 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
printk
(
KERN_INFO
"%s: Unknown AceNIC "
,
dev
->
name
);
printk
(
KERN_INFO
"%s: Unknown AceNIC "
,
dev
->
name
);
break
;
break
;
}
}
ap
->
name
[
sizeof
(
ap
->
name
)
-
1
]
=
'\0'
;
ap
->
name
[
sizeof
(
ap
->
name
)
-
1
]
=
'\0'
;
printk
(
"Gigabit Ethernet at 0x%08lx, "
,
dev
->
base_addr
);
printk
(
"Gigabit Ethernet at 0x%08lx, "
,
dev
->
base_addr
);
#ifdef __sparc__
#ifdef __sparc__
...
@@ -786,20 +587,12 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
...
@@ -786,20 +587,12 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
if
((
readl
(
&
ap
->
regs
->
HostCtrl
)
>>
28
)
==
4
)
{
if
((
readl
(
&
ap
->
regs
->
HostCtrl
)
>>
28
)
==
4
)
{
printk
(
KERN_ERR
"%s: Driver compiled without Tigon I"
printk
(
KERN_ERR
"%s: Driver compiled without Tigon I"
" support - NIC disabled
\n
"
,
dev
->
name
);
" support - NIC disabled
\n
"
,
dev
->
name
);
ace_init_cleanup
(
dev
);
goto
fail_uninit
;
free_netdev
(
dev
);
continue
;
}
}
#endif
#endif
if
(
ace_allocate_descriptors
(
dev
))
{
if
(
ace_allocate_descriptors
(
dev
))
/*
goto
fail_free_netdev
;
* ace_allocate_descriptors() calls
* ace_init_cleanup() on error.
*/
free_netdev
(
dev
);
continue
;
}
#ifdef MODULE
#ifdef MODULE
if
(
boards_found
>=
ACE_MAX_MOD_PARMS
)
if
(
boards_found
>=
ACE_MAX_MOD_PARMS
)
...
@@ -810,79 +603,42 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
...
@@ -810,79 +603,42 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
ap
->
board_idx
=
BOARD_IDX_STATIC
;
ap
->
board_idx
=
BOARD_IDX_STATIC
;
#endif
#endif
if
(
ace_init
(
dev
))
{
if
(
ace_init
(
dev
))
/*
goto
fail_free_netdev
;
* ace_init() calls ace_init_cleanup() on error.
*/
free_netdev
(
dev
);
continue
;
}
if
(
register_netdev
(
dev
))
{
if
(
register_netdev
(
dev
))
{
printk
(
KERN_ERR
"acenic: device registration failed
\n
"
);
printk
(
KERN_ERR
"acenic: device registration failed
\n
"
);
ace_init_cleanup
(
dev
);
goto
fail_uninit
;
free_netdev
(
dev
);
continue
;
}
}
if
(
ap
->
pci_using_dac
)
if
(
ap
->
pci_using_dac
)
dev
->
features
|=
NETIF_F_HIGHDMA
;
dev
->
features
|=
NETIF_F_HIGHDMA
;
boards_found
++
;
pci_set_drvdata
(
pdev
,
dev
);
}
/*
boards_found
++
;
* If we're at this point we're going through ace_probe() for
* the first time. Return success (0) if we've initialized 1
* or more boards. Otherwise, return failure (-ENODEV).
*/
if
(
boards_found
>
0
)
return
0
;
return
0
;
else
fail_uninit:
ace_init_cleanup
(
dev
);
fail_free_netdev:
free_netdev
(
dev
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
static
void
__devexit
acenic_remove_one
(
struct
pci_dev
*
pdev
)
#ifdef MODULE
MODULE_AUTHOR
(
"Jes Sorensen <jes@trained-monkey.org>"
);
MODULE_LICENSE
(
"GPL"
);
MODULE_DESCRIPTION
(
"AceNIC/3C985/GA620 Gigabit Ethernet driver"
);
MODULE_PARM
(
link
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
trace
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
tx_coal_tick
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
max_tx_desc
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
rx_coal_tick
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
max_rx_desc
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM
(
tx_ratio
,
"1-"
__MODULE_STRING
(
8
)
"i"
);
MODULE_PARM_DESC
(
link
,
"AceNIC/3C985/NetGear link state"
);
MODULE_PARM_DESC
(
trace
,
"AceNIC/3C985/NetGear firmware trace level"
);
MODULE_PARM_DESC
(
tx_coal_tick
,
"AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives"
);
MODULE_PARM_DESC
(
max_tx_desc
,
"AceNIC/3C985/GA620 max number of transmit descriptors to wait"
);
MODULE_PARM_DESC
(
rx_coal_tick
,
"AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives"
);
MODULE_PARM_DESC
(
max_rx_desc
,
"AceNIC/3C985/GA620 max number of receive descriptors to wait"
);
MODULE_PARM_DESC
(
tx_ratio
,
"AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)"
);
#endif
static
void
__exit
ace_module_cleanup
(
void
)
{
{
struct
ace_private
*
ap
;
struct
net_device
*
dev
=
pci_get_drvdata
(
pdev
)
;
struct
ace_
regs
*
regs
;
struct
ace_
private
*
ap
=
dev
->
priv
;
struct
net_device
*
next
;
struct
ace_regs
*
regs
=
ap
->
regs
;
short
i
;
short
i
;
while
(
root_dev
)
{
unregister_netdev
(
dev
);
ap
=
root_dev
->
priv
;
next
=
ap
->
next
;
unregister_netdev
(
root_dev
);
regs
=
ap
->
regs
;
writel
(
readl
(
&
regs
->
CpuCtrl
)
|
CPU_HALT
,
&
regs
->
CpuCtrl
);
writel
(
readl
(
&
regs
->
CpuCtrl
)
|
CPU_HALT
,
&
regs
->
CpuCtrl
);
if
(
ap
->
version
>=
2
)
if
(
ap
->
version
>=
2
)
writel
(
readl
(
&
regs
->
CpuBCtrl
)
|
CPU_HALT
,
writel
(
readl
(
&
regs
->
CpuBCtrl
)
|
CPU_HALT
,
&
regs
->
CpuBCtrl
);
&
regs
->
CpuBCtrl
);
/*
/*
* This clears any pending interrupts
* This clears any pending interrupts
*/
*/
...
@@ -898,7 +654,7 @@ static void __exit ace_module_cleanup(void)
...
@@ -898,7 +654,7 @@ static void __exit ace_module_cleanup(void)
* Then release the RX buffers - jumbo buffers were
* Then release the RX buffers - jumbo buffers were
* already released in ace_close().
* already released in ace_close().
*/
*/
ace_sync_irq
(
root_
dev
->
irq
);
ace_sync_irq
(
dev
->
irq
);
for
(
i
=
0
;
i
<
RX_STD_RING_ENTRIES
;
i
++
)
{
for
(
i
=
0
;
i
<
RX_STD_RING_ENTRIES
;
i
++
)
{
struct
sk_buff
*
skb
=
ap
->
skb
->
rx_std_skbuff
[
i
].
skb
;
struct
sk_buff
*
skb
=
ap
->
skb
->
rx_std_skbuff
[
i
].
skb
;
...
@@ -918,6 +674,7 @@ static void __exit ace_module_cleanup(void)
...
@@ -918,6 +674,7 @@ static void __exit ace_module_cleanup(void)
dev_kfree_skb
(
skb
);
dev_kfree_skb
(
skb
);
}
}
}
}
if
(
ap
->
version
>=
2
)
{
if
(
ap
->
version
>=
2
)
{
for
(
i
=
0
;
i
<
RX_MINI_RING_ENTRIES
;
i
++
)
{
for
(
i
=
0
;
i
<
RX_MINI_RING_ENTRIES
;
i
++
)
{
struct
sk_buff
*
skb
=
ap
->
skb
->
rx_mini_skbuff
[
i
].
skb
;
struct
sk_buff
*
skb
=
ap
->
skb
->
rx_mini_skbuff
[
i
].
skb
;
...
@@ -938,6 +695,7 @@ static void __exit ace_module_cleanup(void)
...
@@ -938,6 +695,7 @@ static void __exit ace_module_cleanup(void)
}
}
}
}
}
}
for
(
i
=
0
;
i
<
RX_JUMBO_RING_ENTRIES
;
i
++
)
{
for
(
i
=
0
;
i
<
RX_JUMBO_RING_ENTRIES
;
i
++
)
{
struct
sk_buff
*
skb
=
ap
->
skb
->
rx_jumbo_skbuff
[
i
].
skb
;
struct
sk_buff
*
skb
=
ap
->
skb
->
rx_jumbo_skbuff
[
i
].
skb
;
if
(
skb
)
{
if
(
skb
)
{
...
@@ -956,46 +714,29 @@ static void __exit ace_module_cleanup(void)
...
@@ -956,46 +714,29 @@ static void __exit ace_module_cleanup(void)
}
}
}
}
ace_init_cleanup
(
root_dev
);
ace_init_cleanup
(
dev
);
free_netdev
(
root_dev
);
free_netdev
(
dev
);
root_dev
=
next
;
}
}
int
__init
ace_module_init
(
void
)
{
int
status
;
root_dev
=
NULL
;
#ifdef NEW_NETINIT
status
=
acenic_probe
();
#else
status
=
acenic_probe
(
NULL
);
#endif
return
status
;
}
}
static
struct
pci_driver
acenic_pci_driver
=
{
.
name
=
"acenic"
,
.
id_table
=
acenic_pci_tbl
,
.
probe
=
acenic_probe_one
,
.
remove
=
__devexit_p
(
acenic_remove_one
),
};
#if (LINUX_VERSION_CODE < 0x02032a)
static
int
__init
acenic_init
(
void
)
#ifdef MODULE
int
init_module
(
void
)
{
{
return
ace_module_init
(
);
return
pci_module_init
(
&
acenic_pci_driver
);
}
}
static
void
__exit
acenic_exit
(
void
)
void
cleanup_module
(
void
)
{
{
ace_module_cleanup
(
);
pci_unregister_driver
(
&
acenic_pci_driver
);
}
}
#endif
#else
module_init
(
ace_module_init
);
module_exit
(
ace_module_cleanup
);
#endif
module_init
(
acenic_init
);
module_exit
(
acenic_exit
);
static
void
ace_free_descriptors
(
struct
net_device
*
dev
)
static
void
ace_free_descriptors
(
struct
net_device
*
dev
)
{
{
...
@@ -1462,13 +1203,6 @@ static int __init ace_init(struct net_device *dev)
...
@@ -1462,13 +1203,6 @@ static int __init ace_init(struct net_device *dev)
}
else
}
else
dev
->
irq
=
pdev
->
irq
;
dev
->
irq
=
pdev
->
irq
;
/*
* Register the device here to be able to catch allocated
* interrupt handlers in case the firmware doesn't come up.
*/
ap
->
next
=
root_dev
;
root_dev
=
dev
;
#ifdef INDEX_DEBUG
#ifdef INDEX_DEBUG
spin_lock_init
(
&
ap
->
debug_lock
);
spin_lock_init
(
&
ap
->
debug_lock
);
ap
->
last_tx
=
ACE_TX_RING_ENTRIES
(
ap
)
-
1
;
ap
->
last_tx
=
ACE_TX_RING_ENTRIES
(
ap
)
-
1
;
...
@@ -2642,8 +2376,6 @@ static int ace_open(struct net_device *dev)
...
@@ -2642,8 +2376,6 @@ static int ace_open(struct net_device *dev)
netif_start_queue
(
dev
);
netif_start_queue
(
dev
);
ACE_MOD_INC_USE_COUNT
;
/*
/*
* Setup the bottom half rx ring refill handler
* Setup the bottom half rx ring refill handler
*/
*/
...
@@ -2660,8 +2392,6 @@ static int ace_close(struct net_device *dev)
...
@@ -2660,8 +2392,6 @@ static int ace_close(struct net_device *dev)
unsigned
long
flags
;
unsigned
long
flags
;
short
i
;
short
i
;
ace_if_down
(
dev
);
/*
/*
* Without (or before) releasing irq and stopping hardware, this
* Without (or before) releasing irq and stopping hardware, this
* is an absolute non-sense, by the way. It will be reset instantly
* is an absolute non-sense, by the way. It will be reset instantly
...
@@ -2733,7 +2463,6 @@ static int ace_close(struct net_device *dev)
...
@@ -2733,7 +2463,6 @@ static int ace_close(struct net_device *dev)
ace_unmask_irq
(
dev
);
ace_unmask_irq
(
dev
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
ACE_MOD_DEC_USE_COUNT
;
return
0
;
return
0
;
}
}
...
@@ -2790,12 +2519,6 @@ static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -2790,12 +2519,6 @@ static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct
tx_desc
*
desc
;
struct
tx_desc
*
desc
;
u32
idx
,
flagsize
;
u32
idx
,
flagsize
;
/*
* This only happens with pre-softnet, ie. 2.2.x kernels.
*/
if
(
early_stop_netif_stop_queue
(
dev
))
return
1
;
restart:
restart:
idx
=
ap
->
tx_prd
;
idx
=
ap
->
tx_prd
;
...
...
drivers/net/e1000/e1000.h
View file @
ee86da9d
...
@@ -71,6 +71,7 @@
...
@@ -71,6 +71,7 @@
#include <linux/mii.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_vlan.h>
#include <linux/moduleparam.h>
#define BAR_0 0
#define BAR_0 0
#define BAR_1 1
#define BAR_1 1
...
@@ -89,6 +90,12 @@ struct e1000_adapter;
...
@@ -89,6 +90,12 @@ struct e1000_adapter;
#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
#define PFX "e1000: "
#define DPRINTK(nlevel, klevel, fmt, args...) \
(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
__FUNCTION__ , ## args))
#define E1000_MAX_INTR 10
#define E1000_MAX_INTR 10
/* How many descriptors for TX and RX ? */
/* How many descriptors for TX and RX ? */
...
@@ -245,5 +252,6 @@ struct e1000_adapter {
...
@@ -245,5 +252,6 @@ struct e1000_adapter {
uint32_t
pci_state
[
16
];
uint32_t
pci_state
[
16
];
int
msg_enable
;
};
};
#endif
/* _E1000_H_ */
#endif
/* _E1000_H_ */
drivers/net/e1000/e1000_ethtool.c
View file @
ee86da9d
...
@@ -53,7 +53,7 @@ struct e1000_stats {
...
@@ -53,7 +53,7 @@ struct e1000_stats {
#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
offsetof(struct e1000_adapter, m)
offsetof(struct e1000_adapter, m)
static
struct
e1000_stats
e1000_gstrings_stats
[]
=
{
static
const
struct
e1000_stats
e1000_gstrings_stats
[]
=
{
{
"rx_packets"
,
E1000_STAT
(
net_stats
.
rx_packets
)
},
{
"rx_packets"
,
E1000_STAT
(
net_stats
.
rx_packets
)
},
{
"tx_packets"
,
E1000_STAT
(
net_stats
.
tx_packets
)
},
{
"tx_packets"
,
E1000_STAT
(
net_stats
.
tx_packets
)
},
{
"rx_bytes"
,
E1000_STAT
(
net_stats
.
rx_bytes
)
},
{
"rx_bytes"
,
E1000_STAT
(
net_stats
.
rx_bytes
)
},
...
@@ -89,20 +89,22 @@ static struct e1000_stats e1000_gstrings_stats[] = {
...
@@ -89,20 +89,22 @@ static struct e1000_stats e1000_gstrings_stats[] = {
{
"tx_flow_control_xon"
,
E1000_STAT
(
stats
.
xontxc
)
},
{
"tx_flow_control_xon"
,
E1000_STAT
(
stats
.
xontxc
)
},
{
"tx_flow_control_xoff"
,
E1000_STAT
(
stats
.
xofftxc
)
},
{
"tx_flow_control_xoff"
,
E1000_STAT
(
stats
.
xofftxc
)
},
{
"rx_csum_offload_good"
,
E1000_STAT
(
hw_csum_good
)
},
{
"rx_csum_offload_good"
,
E1000_STAT
(
hw_csum_good
)
},
{
"rx_csum_offload_errors"
,
E1000_STAT
(
hw_csum_err
)
}
{
"rx_csum_offload_errors"
,
E1000_STAT
(
hw_csum_err
)
},
{
"rx_long_byte_count"
,
E1000_STAT
(
stats
.
gorcl
)
}
};
};
#define E1000_STATS_LEN \
#define E1000_STATS_LEN \
sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
static
char
e1000_gstrings_test
[][
ETH_GSTRING_LEN
]
=
{
static
c
onst
c
har
e1000_gstrings_test
[][
ETH_GSTRING_LEN
]
=
{
"Register test (offline)"
,
"Eeprom test (offline)"
,
"Register test (offline)"
,
"Eeprom test (offline)"
,
"Interrupt test (offline)"
,
"Loopback test (offline)"
,
"Interrupt test (offline)"
,
"Loopback test (offline)"
,
"Link test (on/offline)"
"Link test (on/offline)"
};
};
#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
static
void
static
int
e1000_
ethtool_gset
(
struct
e1000_adapter
*
adapter
,
struct
ethtool_cmd
*
ecmd
)
e1000_
get_settings
(
struct
net_device
*
netdev
,
struct
ethtool_cmd
*
ecmd
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
if
(
hw
->
media_type
==
e1000_media_type_copper
)
{
if
(
hw
->
media_type
==
e1000_media_type_copper
)
{
...
@@ -169,11 +171,13 @@ e1000_ethtool_gset(struct e1000_adapter *adapter, struct ethtool_cmd *ecmd)
...
@@ -169,11 +171,13 @@ e1000_ethtool_gset(struct e1000_adapter *adapter, struct ethtool_cmd *ecmd)
}
}
ecmd
->
autoneg
=
(
hw
->
autoneg
?
AUTONEG_ENABLE
:
AUTONEG_DISABLE
);
ecmd
->
autoneg
=
(
hw
->
autoneg
?
AUTONEG_ENABLE
:
AUTONEG_DISABLE
);
return
0
;
}
}
static
int
static
int
e1000_
ethtool_sset
(
struct
e1000_adapter
*
adapter
,
struct
ethtool_cmd
*
ecmd
)
e1000_
set_settings
(
struct
net_device
*
netdev
,
struct
ethtool_cmd
*
ecmd
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
if
(
ecmd
->
autoneg
==
AUTONEG_ENABLE
)
{
if
(
ecmd
->
autoneg
==
AUTONEG_ENABLE
)
{
...
@@ -195,42 +199,41 @@ e1000_ethtool_sset(struct e1000_adapter *adapter, struct ethtool_cmd *ecmd)
...
@@ -195,42 +199,41 @@ e1000_ethtool_sset(struct e1000_adapter *adapter, struct ethtool_cmd *ecmd)
return
0
;
return
0
;
}
}
static
int
static
void
e1000_
ethtool_gpause
(
struct
e1000_adapter
*
adapter
,
e1000_
get_pauseparam
(
struct
net_device
*
netdev
,
struct
ethtool_pauseparam
*
e
pause
)
struct
ethtool_pauseparam
*
pause
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
pause
->
autoneg
=
epause
->
autoneg
=
(
adapter
->
fc_autoneg
?
AUTONEG_ENABLE
:
AUTONEG_DISABLE
);
(
adapter
->
fc_autoneg
?
AUTONEG_ENABLE
:
AUTONEG_DISABLE
);
if
(
hw
->
fc
==
e1000_fc_rx_pause
)
if
(
hw
->
fc
==
e1000_fc_rx_pause
)
e
pause
->
rx_pause
=
1
;
pause
->
rx_pause
=
1
;
else
if
(
hw
->
fc
==
e1000_fc_tx_pause
)
else
if
(
hw
->
fc
==
e1000_fc_tx_pause
)
e
pause
->
tx_pause
=
1
;
pause
->
tx_pause
=
1
;
else
if
(
hw
->
fc
==
e1000_fc_full
)
{
else
if
(
hw
->
fc
==
e1000_fc_full
)
{
e
pause
->
rx_pause
=
1
;
pause
->
rx_pause
=
1
;
e
pause
->
tx_pause
=
1
;
pause
->
tx_pause
=
1
;
}
}
return
0
;
}
}
static
int
static
int
e1000_
ethtool_spause
(
struct
e1000_adapter
*
adapter
,
e1000_
set_pauseparam
(
struct
net_device
*
netdev
,
struct
ethtool_pauseparam
*
e
pause
)
struct
ethtool_pauseparam
*
pause
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
adapter
->
fc_autoneg
=
e
pause
->
autoneg
;
adapter
->
fc_autoneg
=
pause
->
autoneg
;
if
(
epause
->
rx_pause
&&
e
pause
->
tx_pause
)
if
(
pause
->
rx_pause
&&
pause
->
tx_pause
)
hw
->
fc
=
e1000_fc_full
;
hw
->
fc
=
e1000_fc_full
;
else
if
(
epause
->
rx_pause
&&
!
e
pause
->
tx_pause
)
else
if
(
pause
->
rx_pause
&&
!
pause
->
tx_pause
)
hw
->
fc
=
e1000_fc_rx_pause
;
hw
->
fc
=
e1000_fc_rx_pause
;
else
if
(
!
epause
->
rx_pause
&&
e
pause
->
tx_pause
)
else
if
(
!
pause
->
rx_pause
&&
pause
->
tx_pause
)
hw
->
fc
=
e1000_fc_tx_pause
;
hw
->
fc
=
e1000_fc_tx_pause
;
else
if
(
!
epause
->
rx_pause
&&
!
e
pause
->
tx_pause
)
else
if
(
!
pause
->
rx_pause
&&
!
pause
->
tx_pause
)
hw
->
fc
=
e1000_fc_none
;
hw
->
fc
=
e1000_fc_none
;
hw
->
original_fc
=
hw
->
fc
;
hw
->
original_fc
=
hw
->
fc
;
...
@@ -248,28 +251,124 @@ e1000_ethtool_spause(struct e1000_adapter *adapter,
...
@@ -248,28 +251,124 @@ e1000_ethtool_spause(struct e1000_adapter *adapter,
return
0
;
return
0
;
}
}
static
uint32_t
e1000_get_rx_csum
(
struct
net_device
*
netdev
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
return
adapter
->
rx_csum
;
}
static
int
e1000_set_rx_csum
(
struct
net_device
*
netdev
,
uint32_t
data
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
adapter
->
rx_csum
=
data
;
if
(
netif_running
(
netdev
))
{
e1000_down
(
adapter
);
e1000_up
(
adapter
);
}
else
e1000_reset
(
adapter
);
return
0
;
}
static
uint32_t
e1000_get_tx_csum
(
struct
net_device
*
netdev
)
{
return
(
netdev
->
features
&
NETIF_F_HW_CSUM
)
!=
0
;
}
static
int
e1000_set_tx_csum
(
struct
net_device
*
netdev
,
uint32_t
data
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
if
(
adapter
->
hw
.
mac_type
<
e1000_82543
)
{
if
(
!
data
)
return
-
EINVAL
;
return
0
;
}
if
(
data
)
netdev
->
features
|=
NETIF_F_HW_CSUM
;
else
netdev
->
features
&=
~
NETIF_F_HW_CSUM
;
return
0
;
}
static
uint32_t
e1000_get_sg
(
struct
net_device
*
netdev
)
{
return
(
netdev
->
features
&
NETIF_F_SG
)
!=
0
;
}
static
int
e1000_set_sg
(
struct
net_device
*
netdev
,
uint32_t
data
)
{
if
(
data
)
netdev
->
features
|=
NETIF_F_SG
;
else
netdev
->
features
&=
~
NETIF_F_SG
;
return
0
;
}
#ifdef NETIF_F_TSO
static
uint32_t
e1000_get_tso
(
struct
net_device
*
netdev
)
{
return
(
netdev
->
features
&
NETIF_F_TSO
)
!=
0
;
}
static
int
e1000_set_tso
(
struct
net_device
*
netdev
,
uint32_t
data
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
if
((
adapter
->
hw
.
mac_type
<
e1000_82544
)
||
(
adapter
->
hw
.
mac_type
==
e1000_82547
))
return
data
?
-
EINVAL
:
0
;
if
(
data
)
netdev
->
features
|=
NETIF_F_TSO
;
else
netdev
->
features
&=
~
NETIF_F_TSO
;
return
0
;
}
#endif
/* NETIF_F_TSO */
static
uint32_t
e1000_get_msglevel
(
struct
net_device
*
netdev
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
return
adapter
->
msg_enable
;
}
static
void
static
void
e1000_ethtool_gdrvinfo
(
struct
e1000_adapter
*
adapter
,
e1000_set_msglevel
(
struct
net_device
*
netdev
,
uint32_t
data
)
struct
ethtool_drvinfo
*
drvinfo
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
adapter
->
msg_enable
=
data
;
}
static
int
e1000_get_regs_len
(
struct
net_device
*
netdev
)
{
{
strncpy
(
drvinfo
->
driver
,
e1000_driver_name
,
32
);
strncpy
(
drvinfo
->
version
,
e1000_driver_version
,
32
);
strncpy
(
drvinfo
->
fw_version
,
"N/A"
,
32
);
strncpy
(
drvinfo
->
bus_info
,
pci_name
(
adapter
->
pdev
),
32
);
drvinfo
->
n_stats
=
E1000_STATS_LEN
;
drvinfo
->
testinfo_len
=
E1000_TEST_LEN
;
#define E1000_REGS_LEN 32
#define E1000_REGS_LEN 32
drvinfo
->
regdump_len
=
E1000_REGS_LEN
*
sizeof
(
uint32_t
);
return
E1000_REGS_LEN
*
sizeof
(
uint32_t
);
drvinfo
->
eedump_len
=
adapter
->
hw
.
eeprom
.
word_size
*
2
;
}
}
static
void
static
void
e1000_
ethtool_gregs
(
struct
e1000_adapter
*
adapter
,
e1000_
get_regs
(
struct
net_device
*
netdev
,
struct
ethtool_regs
*
regs
,
uint32_t
*
regs_buff
)
struct
ethtool_regs
*
regs
,
void
*
p
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
uint32_t
*
regs_buff
=
p
;
uint16_t
phy_data
;
uint16_t
phy_data
;
memset
(
p
,
0
,
E1000_REGS_LEN
*
sizeof
(
uint32_t
));
regs
->
version
=
(
1
<<
24
)
|
(
hw
->
revision_id
<<
16
)
|
hw
->
device_id
;
regs
->
version
=
(
1
<<
24
)
|
(
hw
->
revision_id
<<
16
)
|
hw
->
device_id
;
regs_buff
[
0
]
=
E1000_READ_REG
(
hw
,
CTRL
);
regs_buff
[
0
]
=
E1000_READ_REG
(
hw
,
CTRL
);
...
@@ -342,37 +441,39 @@ e1000_ethtool_gregs(struct e1000_adapter *adapter,
...
@@ -342,37 +441,39 @@ e1000_ethtool_gregs(struct e1000_adapter *adapter,
e1000_read_phy_reg
(
hw
,
PHY_1000T_STATUS
,
&
phy_data
);
e1000_read_phy_reg
(
hw
,
PHY_1000T_STATUS
,
&
phy_data
);
regs_buff
[
24
]
=
(
uint32_t
)
phy_data
;
/* phy local receiver status */
regs_buff
[
24
]
=
(
uint32_t
)
phy_data
;
/* phy local receiver status */
regs_buff
[
25
]
=
regs_buff
[
24
];
/* phy remote receiver status */
regs_buff
[
25
]
=
regs_buff
[
24
];
/* phy remote receiver status */
}
return
;
static
int
e1000_get_eeprom_len
(
struct
net_device
*
netdev
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
return
adapter
->
hw
.
eeprom
.
word_size
*
2
;
}
}
static
int
static
int
e1000_
ethtool_geeprom
(
struct
e1000_adapter
*
adapter
,
e1000_
get_eeprom
(
struct
net_device
*
netdev
,
struct
ethtool_eeprom
*
eeprom
,
uint
16_t
*
eeprom_buff
)
struct
ethtool_eeprom
*
eeprom
,
uint
8_t
*
bytes
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
uint16_t
*
eeprom_buff
;
int
first_word
,
last_word
;
int
first_word
,
last_word
;
int
ret_val
=
0
;
int
ret_val
=
0
;
uint16_t
i
;
uint16_t
i
;
if
(
eeprom
->
len
==
0
)
{
if
(
eeprom
->
len
==
0
)
ret_val
=
-
EINVAL
;
return
-
EINVAL
;
goto
geeprom_error
;
}
eeprom
->
magic
=
hw
->
vendor_id
|
(
hw
->
device_id
<<
16
);
eeprom
->
magic
=
hw
->
vendor_id
|
(
hw
->
device_id
<<
16
);
if
(
eeprom
->
offset
>
eeprom
->
offset
+
eeprom
->
len
)
{
ret_val
=
-
EINVAL
;
goto
geeprom_error
;
}
if
((
eeprom
->
offset
+
eeprom
->
len
)
>
(
hw
->
eeprom
.
word_size
*
2
))
eeprom
->
len
=
((
hw
->
eeprom
.
word_size
*
2
)
-
eeprom
->
offset
);
first_word
=
eeprom
->
offset
>>
1
;
first_word
=
eeprom
->
offset
>>
1
;
last_word
=
(
eeprom
->
offset
+
eeprom
->
len
-
1
)
>>
1
;
last_word
=
(
eeprom
->
offset
+
eeprom
->
len
-
1
)
>>
1
;
eeprom_buff
=
kmalloc
(
sizeof
(
uint16_t
)
*
(
last_word
-
first_word
+
1
),
GFP_KERNEL
);
if
(
!
eeprom_buff
)
return
-
ENOMEM
;
if
(
hw
->
eeprom
.
type
==
e1000_eeprom_spi
)
if
(
hw
->
eeprom
.
type
==
e1000_eeprom_spi
)
ret_val
=
e1000_read_eeprom
(
hw
,
first_word
,
ret_val
=
e1000_read_eeprom
(
hw
,
first_word
,
last_word
-
first_word
+
1
,
last_word
-
first_word
+
1
,
...
@@ -388,14 +489,19 @@ e1000_ethtool_geeprom(struct e1000_adapter *adapter,
...
@@ -388,14 +489,19 @@ e1000_ethtool_geeprom(struct e1000_adapter *adapter,
for
(
i
=
0
;
i
<
last_word
-
first_word
+
1
;
i
++
)
for
(
i
=
0
;
i
<
last_word
-
first_word
+
1
;
i
++
)
le16_to_cpus
(
&
eeprom_buff
[
i
]);
le16_to_cpus
(
&
eeprom_buff
[
i
]);
geeprom_error:
memcpy
(
bytes
,
(
uint8_t
*
)
eeprom_buff
+
(
eeprom
->
offset
%
2
),
eeprom
->
len
);
kfree
(
eeprom_buff
);
return
ret_val
;
return
ret_val
;
}
}
static
int
static
int
e1000_
ethtool_seeprom
(
struct
e1000_adapter
*
adapter
,
e1000_
set_eeprom
(
struct
net_device
*
netdev
,
struct
ethtool_eeprom
*
eeprom
,
void
*
user_data
)
struct
ethtool_eeprom
*
eeprom
,
uint8_t
*
bytes
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
uint16_t
*
eeprom_buff
;
uint16_t
*
eeprom_buff
;
void
*
ptr
;
void
*
ptr
;
...
@@ -410,9 +516,6 @@ e1000_ethtool_seeprom(struct e1000_adapter *adapter,
...
@@ -410,9 +516,6 @@ e1000_ethtool_seeprom(struct e1000_adapter *adapter,
max_len
=
hw
->
eeprom
.
word_size
*
2
;
max_len
=
hw
->
eeprom
.
word_size
*
2
;
if
((
eeprom
->
offset
+
eeprom
->
len
)
>
max_len
)
eeprom
->
len
=
(
max_len
-
eeprom
->
offset
);
first_word
=
eeprom
->
offset
>>
1
;
first_word
=
eeprom
->
offset
>>
1
;
last_word
=
(
eeprom
->
offset
+
eeprom
->
len
-
1
)
>>
1
;
last_word
=
(
eeprom
->
offset
+
eeprom
->
len
-
1
)
>>
1
;
eeprom_buff
=
kmalloc
(
max_len
,
GFP_KERNEL
);
eeprom_buff
=
kmalloc
(
max_len
,
GFP_KERNEL
);
...
@@ -439,11 +542,7 @@ e1000_ethtool_seeprom(struct e1000_adapter *adapter,
...
@@ -439,11 +542,7 @@ e1000_ethtool_seeprom(struct e1000_adapter *adapter,
for
(
i
=
0
;
i
<
last_word
-
first_word
+
1
;
i
++
)
for
(
i
=
0
;
i
<
last_word
-
first_word
+
1
;
i
++
)
le16_to_cpus
(
&
eeprom_buff
[
i
]);
le16_to_cpus
(
&
eeprom_buff
[
i
]);
if
((
ret_val
!=
0
)
||
copy_from_user
(
ptr
,
user_data
,
eeprom
->
len
))
{
memcpy
(
ptr
,
bytes
,
eeprom
->
len
);
ret_val
=
-
EFAULT
;
goto
seeprom_error
;
}
for
(
i
=
0
;
i
<
last_word
-
first_word
+
1
;
i
++
)
for
(
i
=
0
;
i
<
last_word
-
first_word
+
1
;
i
++
)
eeprom_buff
[
i
]
=
cpu_to_le16
(
eeprom_buff
[
i
]);
eeprom_buff
[
i
]
=
cpu_to_le16
(
eeprom_buff
[
i
]);
...
@@ -454,15 +553,31 @@ e1000_ethtool_seeprom(struct e1000_adapter *adapter,
...
@@ -454,15 +553,31 @@ e1000_ethtool_seeprom(struct e1000_adapter *adapter,
if
((
ret_val
==
0
)
&&
first_word
<=
EEPROM_CHECKSUM_REG
)
if
((
ret_val
==
0
)
&&
first_word
<=
EEPROM_CHECKSUM_REG
)
e1000_update_eeprom_checksum
(
hw
);
e1000_update_eeprom_checksum
(
hw
);
seeprom_error:
kfree
(
eeprom_buff
);
kfree
(
eeprom_buff
);
return
ret_val
;
return
ret_val
;
}
}
static
int
static
void
e1000_ethtool_gring
(
struct
e1000_adapter
*
adapter
,
e1000_get_drvinfo
(
struct
net_device
*
netdev
,
struct
ethtool_drvinfo
*
drvinfo
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
strncpy
(
drvinfo
->
driver
,
e1000_driver_name
,
32
);
strncpy
(
drvinfo
->
version
,
e1000_driver_version
,
32
);
strncpy
(
drvinfo
->
fw_version
,
"N/A"
,
32
);
strncpy
(
drvinfo
->
bus_info
,
pci_name
(
adapter
->
pdev
),
32
);
drvinfo
->
n_stats
=
E1000_STATS_LEN
;
drvinfo
->
testinfo_len
=
E1000_TEST_LEN
;
drvinfo
->
regdump_len
=
e1000_get_regs_len
(
netdev
);
drvinfo
->
eedump_len
=
e1000_get_eeprom_len
(
netdev
);
}
static
void
e1000_get_ringparam
(
struct
net_device
*
netdev
,
struct
ethtool_ringparam
*
ring
)
struct
ethtool_ringparam
*
ring
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
e1000_mac_type
mac_type
=
adapter
->
hw
.
mac_type
;
e1000_mac_type
mac_type
=
adapter
->
hw
.
mac_type
;
struct
e1000_desc_ring
*
txdr
=
&
adapter
->
tx_ring
;
struct
e1000_desc_ring
*
txdr
=
&
adapter
->
tx_ring
;
struct
e1000_desc_ring
*
rxdr
=
&
adapter
->
rx_ring
;
struct
e1000_desc_ring
*
rxdr
=
&
adapter
->
rx_ring
;
...
@@ -477,14 +592,14 @@ e1000_ethtool_gring(struct e1000_adapter *adapter,
...
@@ -477,14 +592,14 @@ e1000_ethtool_gring(struct e1000_adapter *adapter,
ring
->
tx_pending
=
txdr
->
count
;
ring
->
tx_pending
=
txdr
->
count
;
ring
->
rx_mini_pending
=
0
;
ring
->
rx_mini_pending
=
0
;
ring
->
rx_jumbo_pending
=
0
;
ring
->
rx_jumbo_pending
=
0
;
return
0
;
}
}
static
int
static
int
e1000_
ethtool_sring
(
struct
e1000_adapter
*
adapter
,
e1000_
set_ringparam
(
struct
net_device
*
netdev
,
struct
ethtool_ringparam
*
ring
)
struct
ethtool_ringparam
*
ring
)
{
{
int
err
;
int
err
;
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
e1000_mac_type
mac_type
=
adapter
->
hw
.
mac_type
;
e1000_mac_type
mac_type
=
adapter
->
hw
.
mac_type
;
struct
e1000_desc_ring
*
txdr
=
&
adapter
->
tx_ring
;
struct
e1000_desc_ring
*
txdr
=
&
adapter
->
tx_ring
;
struct
e1000_desc_ring
*
rxdr
=
&
adapter
->
rx_ring
;
struct
e1000_desc_ring
*
rxdr
=
&
adapter
->
rx_ring
;
...
@@ -538,6 +653,7 @@ e1000_ethtool_sring(struct e1000_adapter *adapter,
...
@@ -538,6 +653,7 @@ e1000_ethtool_sring(struct e1000_adapter *adapter,
return
err
;
return
err
;
}
}
#define REG_PATTERN_TEST(R, M, W) \
#define REG_PATTERN_TEST(R, M, W) \
{ \
{ \
uint32_t pat, value; \
uint32_t pat, value; \
...
@@ -628,6 +744,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
...
@@ -628,6 +744,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
for
(
i
=
0
;
i
<
E1000_MC_TBL_SIZE
;
i
++
)
for
(
i
=
0
;
i
<
E1000_MC_TBL_SIZE
;
i
++
)
REG_PATTERN_TEST
(
MTA
+
(
i
<<
2
),
0xFFFFFFFF
,
0xFFFFFFFF
);
REG_PATTERN_TEST
(
MTA
+
(
i
<<
2
),
0xFFFFFFFF
,
0xFFFFFFFF
);
*
data
=
0
;
return
0
;
return
0
;
}
}
...
@@ -939,8 +1056,6 @@ e1000_phy_disable_receiver(struct e1000_adapter *adapter)
...
@@ -939,8 +1056,6 @@ e1000_phy_disable_receiver(struct e1000_adapter *adapter)
e1000_write_phy_reg
(
&
adapter
->
hw
,
30
,
0x8FFC
);
e1000_write_phy_reg
(
&
adapter
->
hw
,
30
,
0x8FFC
);
e1000_write_phy_reg
(
&
adapter
->
hw
,
29
,
0x001A
);
e1000_write_phy_reg
(
&
adapter
->
hw
,
29
,
0x001A
);
e1000_write_phy_reg
(
&
adapter
->
hw
,
30
,
0x8FF0
);
e1000_write_phy_reg
(
&
adapter
->
hw
,
30
,
0x8FF0
);
return
;
}
}
static
void
static
void
...
@@ -1219,7 +1334,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
...
@@ -1219,7 +1334,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
for
(
i
=
0
;
i
<
64
;
i
++
)
{
for
(
i
=
0
;
i
<
64
;
i
++
)
{
e1000_create_lbtest_frame
(
txdr
->
buffer_info
[
i
].
skb
,
1024
);
e1000_create_lbtest_frame
(
txdr
->
buffer_info
[
i
].
skb
,
1024
);
pci_dma_sync_single
_for_device
(
pdev
,
txdr
->
buffer_info
[
i
].
dma
,
pci_dma_sync_single
(
pdev
,
txdr
->
buffer_info
[
i
].
dma
,
txdr
->
buffer_info
[
i
].
length
,
txdr
->
buffer_info
[
i
].
length
,
PCI_DMA_TODEVICE
);
PCI_DMA_TODEVICE
);
}
}
...
@@ -1227,7 +1342,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
...
@@ -1227,7 +1342,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
msec_delay
(
200
);
msec_delay
(
200
);
pci_dma_sync_single
_for_cpu
(
pdev
,
rxdr
->
buffer_info
[
0
].
dma
,
pci_dma_sync_single
(
pdev
,
rxdr
->
buffer_info
[
0
].
dma
,
rxdr
->
buffer_info
[
0
].
length
,
PCI_DMA_FROMDEVICE
);
rxdr
->
buffer_info
[
0
].
length
,
PCI_DMA_FROMDEVICE
);
return
e1000_check_lbtest_frame
(
rxdr
->
buffer_info
[
0
].
skb
,
1024
);
return
e1000_check_lbtest_frame
(
rxdr
->
buffer_info
[
0
].
skb
,
1024
);
...
@@ -1258,14 +1373,26 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
...
@@ -1258,14 +1373,26 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
}
}
static
int
static
int
e1000_ethtool_test
(
struct
e1000_adapter
*
adapter
,
e1000_diag_test_count
(
struct
net_device
*
netdev
)
{
return
E1000_TEST_LEN
;
}
static
void
e1000_diag_test
(
struct
net_device
*
netdev
,
struct
ethtool_test
*
eth_test
,
uint64_t
*
data
)
struct
ethtool_test
*
eth_test
,
uint64_t
*
data
)
{
{
boolean_t
if_running
=
netif_running
(
adapter
->
netdev
);
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
boolean_t
if_running
=
netif_running
(
netdev
);
if
(
eth_test
->
flags
==
ETH_TEST_FL_OFFLINE
)
{
if
(
eth_test
->
flags
==
ETH_TEST_FL_OFFLINE
)
{
/* Offline tests */
/* Offline tests */
/* save speed, duplex, autoneg settings */
uint16_t
autoneg_advertised
=
adapter
->
hw
.
autoneg_advertised
;
uint8_t
forced_speed_duplex
=
adapter
->
hw
.
forced_speed_duplex
;
uint8_t
autoneg
=
adapter
->
hw
.
autoneg
;
/* Link test performed before hardware reset so autoneg doesn't
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
* interfere with test result */
if
(
e1000_link_test
(
adapter
,
&
data
[
4
]))
if
(
e1000_link_test
(
adapter
,
&
data
[
4
]))
...
@@ -1291,6 +1418,10 @@ e1000_ethtool_test(struct e1000_adapter *adapter,
...
@@ -1291,6 +1418,10 @@ e1000_ethtool_test(struct e1000_adapter *adapter,
if
(
e1000_loopback_test
(
adapter
,
&
data
[
3
]))
if
(
e1000_loopback_test
(
adapter
,
&
data
[
3
]))
eth_test
->
flags
|=
ETH_TEST_FL_FAILED
;
eth_test
->
flags
|=
ETH_TEST_FL_FAILED
;
/* restore Autoneg/speed/duplex settings */
adapter
->
hw
.
autoneg_advertised
=
autoneg_advertised
;
adapter
->
hw
.
forced_speed_duplex
=
forced_speed_duplex
;
adapter
->
hw
.
autoneg
=
autoneg
;
e1000_reset
(
adapter
);
e1000_reset
(
adapter
);
if
(
if_running
)
if
(
if_running
)
e1000_up
(
adapter
);
e1000_up
(
adapter
);
...
@@ -1305,12 +1436,12 @@ e1000_ethtool_test(struct e1000_adapter *adapter,
...
@@ -1305,12 +1436,12 @@ e1000_ethtool_test(struct e1000_adapter *adapter,
data
[
2
]
=
0
;
data
[
2
]
=
0
;
data
[
3
]
=
0
;
data
[
3
]
=
0
;
}
}
return
0
;
}
}
static
void
static
void
e1000_
ethtool_gwol
(
struct
e1000_adapter
*
adapter
,
struct
ethtool_wolinfo
*
wol
)
e1000_
get_wol
(
struct
net_device
*
netdev
,
struct
ethtool_wolinfo
*
wol
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
switch
(
adapter
->
hw
.
device_id
)
{
switch
(
adapter
->
hw
.
device_id
)
{
...
@@ -1350,8 +1481,9 @@ e1000_ethtool_gwol(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
...
@@ -1350,8 +1481,9 @@ e1000_ethtool_gwol(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
}
}
static
int
static
int
e1000_
ethtool_swol
(
struct
e1000_adapter
*
adapter
,
struct
ethtool_wolinfo
*
wol
)
e1000_
set_wol
(
struct
net_device
*
netdev
,
struct
ethtool_wolinfo
*
wol
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
switch
(
adapter
->
hw
.
device_id
)
{
switch
(
adapter
->
hw
.
device_id
)
{
...
@@ -1387,7 +1519,6 @@ e1000_ethtool_swol(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
...
@@ -1387,7 +1519,6 @@ e1000_ethtool_swol(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
return
0
;
return
0
;
}
}
/* toggle LED 4 times per second = 2 "blinks" per second */
/* toggle LED 4 times per second = 2 "blinks" per second */
#define E1000_ID_INTERVAL (HZ/4)
#define E1000_ID_INTERVAL (HZ/4)
...
@@ -1408,8 +1539,13 @@ e1000_led_blink_callback(unsigned long data)
...
@@ -1408,8 +1539,13 @@ e1000_led_blink_callback(unsigned long data)
}
}
static
int
static
int
e1000_
ethtool_led_blink
(
struct
e1000_adapter
*
adapter
,
struct
ethtool_value
*
id
)
e1000_
phys_id
(
struct
net_device
*
netdev
,
uint32_t
data
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
if
(
!
data
||
data
>
(
uint32_t
)(
MAX_SCHEDULE_TIMEOUT
/
HZ
))
data
=
(
uint32_t
)(
MAX_SCHEDULE_TIMEOUT
/
HZ
);
if
(
!
adapter
->
blink_timer
.
function
)
{
if
(
!
adapter
->
blink_timer
.
function
)
{
init_timer
(
&
adapter
->
blink_timer
);
init_timer
(
&
adapter
->
blink_timer
);
adapter
->
blink_timer
.
function
=
e1000_led_blink_callback
;
adapter
->
blink_timer
.
function
=
e1000_led_blink_callback
;
...
@@ -1420,11 +1556,8 @@ e1000_ethtool_led_blink(struct e1000_adapter *adapter, struct ethtool_value *id)
...
@@ -1420,11 +1556,8 @@ e1000_ethtool_led_blink(struct e1000_adapter *adapter, struct ethtool_value *id)
mod_timer
(
&
adapter
->
blink_timer
,
jiffies
);
mod_timer
(
&
adapter
->
blink_timer
,
jiffies
);
set_current_state
(
TASK_INTERRUPTIBLE
);
set_current_state
(
TASK_INTERRUPTIBLE
);
if
(
id
->
data
)
schedule_timeout
(
id
->
data
*
HZ
);
else
schedule_timeout
(
MAX_SCHEDULE_TIMEOUT
);
schedule_timeout
(
data
*
HZ
);
del_timer_sync
(
&
adapter
->
blink_timer
);
del_timer_sync
(
&
adapter
->
blink_timer
);
e1000_led_off
(
&
adapter
->
hw
);
e1000_led_off
(
&
adapter
->
hw
);
clear_bit
(
E1000_LED_ON
,
&
adapter
->
led_status
);
clear_bit
(
E1000_LED_ON
,
&
adapter
->
led_status
);
...
@@ -1433,345 +1566,102 @@ e1000_ethtool_led_blink(struct e1000_adapter *adapter, struct ethtool_value *id)
...
@@ -1433,345 +1566,102 @@ e1000_ethtool_led_blink(struct e1000_adapter *adapter, struct ethtool_value *id)
return
0
;
return
0
;
}
}
int
static
int
e1000_
ethtool_ioctl
(
struct
net_device
*
netdev
,
struct
ifreq
*
ifr
)
e1000_
nway_reset
(
struct
net_device
*
netdev
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
void
*
addr
=
ifr
->
ifr_data
;
uint32_t
cmd
;
if
(
get_user
(
cmd
,
(
uint32_t
*
)
addr
))
return
-
EFAULT
;
switch
(
cmd
)
{
case
ETHTOOL_GSET
:
{
struct
ethtool_cmd
ecmd
=
{
ETHTOOL_GSET
};
e1000_ethtool_gset
(
adapter
,
&
ecmd
);
if
(
copy_to_user
(
addr
,
&
ecmd
,
sizeof
(
ecmd
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SSET
:
{
struct
ethtool_cmd
ecmd
;
if
(
copy_from_user
(
&
ecmd
,
addr
,
sizeof
(
ecmd
)))
return
-
EFAULT
;
return
e1000_ethtool_sset
(
adapter
,
&
ecmd
);
}
case
ETHTOOL_GDRVINFO
:
{
struct
ethtool_drvinfo
drvinfo
=
{
ETHTOOL_GDRVINFO
};
e1000_ethtool_gdrvinfo
(
adapter
,
&
drvinfo
);
if
(
copy_to_user
(
addr
,
&
drvinfo
,
sizeof
(
drvinfo
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_GSTRINGS
:
{
struct
ethtool_gstrings
gstrings
=
{
ETHTOOL_GSTRINGS
};
char
*
strings
=
NULL
;
int
err
=
0
;
if
(
copy_from_user
(
&
gstrings
,
addr
,
sizeof
(
gstrings
)))
return
-
EFAULT
;
switch
(
gstrings
.
string_set
)
{
case
ETH_SS_TEST
:
gstrings
.
len
=
E1000_TEST_LEN
;
strings
=
kmalloc
(
E1000_TEST_LEN
*
ETH_GSTRING_LEN
,
GFP_KERNEL
);
if
(
!
strings
)
return
-
ENOMEM
;
memcpy
(
strings
,
e1000_gstrings_test
,
E1000_TEST_LEN
*
ETH_GSTRING_LEN
);
break
;
case
ETH_SS_STATS
:
{
int
i
;
gstrings
.
len
=
E1000_STATS_LEN
;
strings
=
kmalloc
(
E1000_STATS_LEN
*
ETH_GSTRING_LEN
,
GFP_KERNEL
);
if
(
!
strings
)
return
-
ENOMEM
;
for
(
i
=
0
;
i
<
E1000_STATS_LEN
;
i
++
)
{
memcpy
(
&
strings
[
i
*
ETH_GSTRING_LEN
],
e1000_gstrings_stats
[
i
].
stat_string
,
ETH_GSTRING_LEN
);
}
break
;
}
default:
return
-
EOPNOTSUPP
;
}
if
(
copy_to_user
(
addr
,
&
gstrings
,
sizeof
(
gstrings
)))
err
=
-
EFAULT
;
addr
+=
offsetof
(
struct
ethtool_gstrings
,
data
);
if
(
!
err
&&
copy_to_user
(
addr
,
strings
,
gstrings
.
len
*
ETH_GSTRING_LEN
))
err
=
-
EFAULT
;
kfree
(
strings
);
return
err
;
}
case
ETHTOOL_GREGS
:
{
struct
ethtool_regs
regs
=
{
ETHTOOL_GREGS
};
uint32_t
regs_buff
[
E1000_REGS_LEN
];
if
(
copy_from_user
(
&
regs
,
addr
,
sizeof
(
regs
)))
return
-
EFAULT
;
memset
(
regs_buff
,
0
,
sizeof
(
regs_buff
));
if
(
regs
.
len
>
E1000_REGS_LEN
)
regs
.
len
=
E1000_REGS_LEN
;
e1000_ethtool_gregs
(
adapter
,
&
regs
,
regs_buff
);
if
(
copy_to_user
(
addr
,
&
regs
,
sizeof
(
regs
)))
return
-
EFAULT
;
addr
+=
offsetof
(
struct
ethtool_regs
,
data
);
if
(
copy_to_user
(
addr
,
regs_buff
,
regs
.
len
))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_NWAY_RST
:
{
if
(
netif_running
(
netdev
))
{
if
(
netif_running
(
netdev
))
{
e1000_down
(
adapter
);
e1000_down
(
adapter
);
e1000_up
(
adapter
);
e1000_up
(
adapter
);
}
}
return
0
;
return
0
;
}
}
case
ETHTOOL_PHYS_ID
:
{
struct
ethtool_value
id
;
if
(
copy_from_user
(
&
id
,
addr
,
sizeof
(
id
)))
return
-
EFAULT
;
return
e1000_ethtool_led_blink
(
adapter
,
&
id
);
}
case
ETHTOOL_GLINK
:
{
struct
ethtool_value
link
=
{
ETHTOOL_GLINK
};
link
.
data
=
netif_carrier_ok
(
netdev
);
if
(
copy_to_user
(
addr
,
&
link
,
sizeof
(
link
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_GWOL
:
{
struct
ethtool_wolinfo
wol
=
{
ETHTOOL_GWOL
};
e1000_ethtool_gwol
(
adapter
,
&
wol
);
if
(
copy_to_user
(
addr
,
&
wol
,
sizeof
(
wol
))
!=
0
)
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SWOL
:
{
struct
ethtool_wolinfo
wol
;
if
(
copy_from_user
(
&
wol
,
addr
,
sizeof
(
wol
))
!=
0
)
return
-
EFAULT
;
return
e1000_ethtool_swol
(
adapter
,
&
wol
);
}
case
ETHTOOL_GEEPROM
:
{
struct
ethtool_eeprom
eeprom
=
{
ETHTOOL_GEEPROM
};
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
uint16_t
*
eeprom_buff
;
void
*
ptr
;
int
err
=
0
;
if
(
copy_from_user
(
&
eeprom
,
addr
,
sizeof
(
eeprom
)))
return
-
EFAULT
;
eeprom_buff
=
kmalloc
(
hw
->
eeprom
.
word_size
*
2
,
GFP_KERNEL
);
if
(
!
eeprom_buff
)
return
-
ENOMEM
;
if
((
err
=
e1000_ethtool_geeprom
(
adapter
,
&
eeprom
,
eeprom_buff
)))
goto
err_geeprom_ioctl
;
if
(
copy_to_user
(
addr
,
&
eeprom
,
sizeof
(
eeprom
)))
{
err
=
-
EFAULT
;
goto
err_geeprom_ioctl
;
}
addr
+=
offsetof
(
struct
ethtool_eeprom
,
data
);
ptr
=
((
void
*
)
eeprom_buff
)
+
(
eeprom
.
offset
&
1
);
if
(
copy_to_user
(
addr
,
ptr
,
eeprom
.
len
))
err
=
-
EFAULT
;
err_geeprom_ioctl:
static
uint32_t
kfree
(
eeprom_buff
);
e1000_get_link
(
struct
net_device
*
netdev
)
return
err
;
{
}
return
netif_carrier_ok
(
netdev
);
case
ETHTOOL_SEEPROM
:
{
}
struct
ethtool_eeprom
eeprom
;
if
(
copy_from_user
(
&
eeprom
,
addr
,
sizeof
(
eeprom
)))
static
int
return
-
EFAULT
;
e1000_get_stats_count
(
struct
net_device
*
netdev
)
{
return
E1000_STATS_LEN
;
}
addr
+=
offsetof
(
struct
ethtool_eeprom
,
data
);
static
void
return
e1000_ethtool_seeprom
(
adapter
,
&
eeprom
,
addr
);
e1000_get_ethtool_stats
(
struct
net_device
*
netdev
,
}
struct
ethtool_stats
*
stats
,
uint64_t
*
data
)
case
ETHTOOL_GRINGPARAM
:
{
{
struct
ethtool_ringparam
ering
=
{
ETHTOOL_GRINGPARAM
};
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
e1000_ethtool_gring
(
adapter
,
&
ering
);
if
(
copy_to_user
(
addr
,
&
ering
,
sizeof
(
ering
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SRINGPARAM
:
{
struct
ethtool_ringparam
ering
;
if
(
copy_from_user
(
&
ering
,
addr
,
sizeof
(
ering
)))
return
-
EFAULT
;
return
e1000_ethtool_sring
(
adapter
,
&
ering
);
}
case
ETHTOOL_GPAUSEPARAM
:
{
struct
ethtool_pauseparam
epause
=
{
ETHTOOL_GPAUSEPARAM
};
e1000_ethtool_gpause
(
adapter
,
&
epause
);
if
(
copy_to_user
(
addr
,
&
epause
,
sizeof
(
epause
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SPAUSEPARAM
:
{
struct
ethtool_pauseparam
epause
;
if
(
copy_from_user
(
&
epause
,
addr
,
sizeof
(
epause
)))
return
-
EFAULT
;
return
e1000_ethtool_spause
(
adapter
,
&
epause
);
}
case
ETHTOOL_GSTATS
:
{
struct
{
struct
ethtool_stats
eth_stats
;
uint64_t
data
[
E1000_STATS_LEN
];
}
stats
=
{
{
ETHTOOL_GSTATS
,
E1000_STATS_LEN
}
};
int
i
;
int
i
;
e1000_update_stats
(
adapter
);
e1000_update_stats
(
adapter
);
for
(
i
=
0
;
i
<
E1000_STATS_LEN
;
i
++
)
for
(
i
=
0
;
i
<
E1000_STATS_LEN
;
i
++
)
{
stats
.
data
[
i
]
=
(
e1000_gstrings_stats
[
i
].
sizeof_stat
==
char
*
p
=
(
char
*
)
adapter
+
e1000_gstrings_stats
[
i
].
stat_offset
;
sizeof
(
uint64_t
))
?
data
[
i
]
=
(
e1000_gstrings_stats
[
i
].
sizeof_stat
==
sizeof
(
uint64_t
))
*
(
uint64_t
*
)((
char
*
)
adapter
+
?
*
(
uint64_t
*
)
p
:
*
(
uint32_t
*
)
p
;
e1000_gstrings_stats
[
i
].
stat_offset
)
:
*
(
uint32_t
*
)((
char
*
)
adapter
+
e1000_gstrings_stats
[
i
].
stat_offset
);
if
(
copy_to_user
(
addr
,
&
stats
,
sizeof
(
stats
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_TEST
:
{
struct
{
struct
ethtool_test
eth_test
;
uint64_t
data
[
E1000_TEST_LEN
];
}
test
=
{
{
ETHTOOL_TEST
}
};
int
err
;
if
(
copy_from_user
(
&
test
.
eth_test
,
addr
,
sizeof
(
test
.
eth_test
)))
return
-
EFAULT
;
test
.
eth_test
.
len
=
E1000_TEST_LEN
;
if
((
err
=
e1000_ethtool_test
(
adapter
,
&
test
.
eth_test
,
test
.
data
)))
return
err
;
if
(
copy_to_user
(
addr
,
&
test
,
sizeof
(
test
))
!=
0
)
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_GRXCSUM
:
{
struct
ethtool_value
edata
=
{
ETHTOOL_GRXCSUM
};
edata
.
data
=
adapter
->
rx_csum
;
if
(
copy_to_user
(
addr
,
&
edata
,
sizeof
(
edata
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SRXCSUM
:
{
struct
ethtool_value
edata
;
if
(
copy_from_user
(
&
edata
,
addr
,
sizeof
(
edata
)))
return
-
EFAULT
;
adapter
->
rx_csum
=
edata
.
data
;
if
(
netif_running
(
netdev
))
{
e1000_down
(
adapter
);
e1000_up
(
adapter
);
}
else
e1000_reset
(
adapter
);
return
0
;
}
case
ETHTOOL_GTXCSUM
:
{
struct
ethtool_value
edata
=
{
ETHTOOL_GTXCSUM
};
edata
.
data
=
(
netdev
->
features
&
NETIF_F_HW_CSUM
)
!=
0
;
if
(
copy_to_user
(
addr
,
&
edata
,
sizeof
(
edata
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_STXCSUM
:
{
struct
ethtool_value
edata
;
if
(
copy_from_user
(
&
edata
,
addr
,
sizeof
(
edata
)))
return
-
EFAULT
;
if
(
adapter
->
hw
.
mac_type
<
e1000_82543
)
{
if
(
edata
.
data
!=
0
)
return
-
EINVAL
;
return
0
;
}
}
}
if
(
edata
.
data
)
static
void
netdev
->
features
|=
NETIF_F_HW_CSUM
;
e1000_get_strings
(
struct
net_device
*
netdev
,
uint32_t
stringset
,
uint8_t
*
data
)
else
{
netdev
->
features
&=
~
NETIF_F_HW_CSUM
;
int
i
;
return
0
;
switch
(
stringset
)
{
case
ETH_SS_TEST
:
memcpy
(
data
,
*
e1000_gstrings_test
,
E1000_TEST_LEN
*
ETH_GSTRING_LEN
);
break
;
case
ETH_SS_STATS
:
for
(
i
=
0
;
i
<
E1000_STATS_LEN
;
i
++
)
{
memcpy
(
data
+
i
*
ETH_GSTRING_LEN
,
e1000_gstrings_stats
[
i
].
stat_string
,
ETH_GSTRING_LEN
);
}
}
case
ETHTOOL_GSG
:
{
break
;
struct
ethtool_value
edata
=
{
ETHTOOL_GSG
};
edata
.
data
=
(
netdev
->
features
&
NETIF_F_SG
)
!=
0
;
if
(
copy_to_user
(
addr
,
&
edata
,
sizeof
(
edata
)))
return
-
EFAULT
;
return
0
;
}
}
case
ETHTOOL_SSG
:
{
}
struct
ethtool_value
edata
;
if
(
copy_from_user
(
&
edata
,
addr
,
sizeof
(
edata
)))
return
-
EFAULT
;
if
(
edata
.
data
)
netdev
->
features
|=
NETIF_F_SG
;
else
netdev
->
features
&=
~
NETIF_F_SG
;
return
0
;
struct
ethtool_ops
e1000_ethtool_ops
=
{
}
.
get_settings
=
e1000_get_settings
,
.
set_settings
=
e1000_set_settings
,
.
get_drvinfo
=
e1000_get_drvinfo
,
.
get_regs_len
=
e1000_get_regs_len
,
.
get_regs
=
e1000_get_regs
,
.
get_wol
=
e1000_get_wol
,
.
set_wol
=
e1000_set_wol
,
.
get_msglevel
=
e1000_get_msglevel
,
.
set_msglevel
=
e1000_set_msglevel
,
.
nway_reset
=
e1000_nway_reset
,
.
get_link
=
e1000_get_link
,
.
get_eeprom_len
=
e1000_get_eeprom_len
,
.
get_eeprom
=
e1000_get_eeprom
,
.
set_eeprom
=
e1000_set_eeprom
,
.
get_ringparam
=
e1000_get_ringparam
,
.
set_ringparam
=
e1000_set_ringparam
,
.
get_pauseparam
=
e1000_get_pauseparam
,
.
set_pauseparam
=
e1000_set_pauseparam
,
.
get_rx_csum
=
e1000_get_rx_csum
,
.
set_rx_csum
=
e1000_set_rx_csum
,
.
get_tx_csum
=
e1000_get_tx_csum
,
.
set_tx_csum
=
e1000_set_tx_csum
,
.
get_sg
=
e1000_get_sg
,
.
set_sg
=
e1000_set_sg
,
#ifdef NETIF_F_TSO
#ifdef NETIF_F_TSO
case
ETHTOOL_GTSO
:
{
.
get_tso
=
e1000_get_tso
,
struct
ethtool_value
edata
=
{
ETHTOOL_GTSO
};
.
set_tso
=
e1000_set_tso
,
edata
.
data
=
(
netdev
->
features
&
NETIF_F_TSO
)
!=
0
;
if
(
copy_to_user
(
addr
,
&
edata
,
sizeof
(
edata
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_STSO
:
{
struct
ethtool_value
edata
;
if
(
copy_from_user
(
&
edata
,
addr
,
sizeof
(
edata
)))
return
-
EFAULT
;
if
((
adapter
->
hw
.
mac_type
<
e1000_82544
)
||
(
adapter
->
hw
.
mac_type
==
e1000_82547
))
{
if
(
edata
.
data
!=
0
)
return
-
EINVAL
;
return
0
;
}
if
(
edata
.
data
)
netdev
->
features
|=
NETIF_F_TSO
;
else
netdev
->
features
&=
~
NETIF_F_TSO
;
return
0
;
}
#endif
#endif
default:
.
self_test_count
=
e1000_diag_test_count
,
return
-
EOPNOTSUPP
;
.
self_test
=
e1000_diag_test
,
}
.
get_strings
=
e1000_get_strings
,
}
.
phys_id
=
e1000_phys_id
,
.
get_stats_count
=
e1000_get_stats_count
,
.
get_ethtool_stats
=
e1000_get_ethtool_stats
,
};
void
set_ethtool_ops
(
struct
net_device
*
netdev
)
{
SET_ETHTOOL_OPS
(
netdev
,
&
e1000_ethtool_ops
);
}
drivers/net/e1000/e1000_hw.c
View file @
ee86da9d
...
@@ -470,7 +470,6 @@ e1000_init_hw(struct e1000_hw *hw)
...
@@ -470,7 +470,6 @@ e1000_init_hw(struct e1000_hw *hw)
uint16_t
pcix_stat_hi_word
;
uint16_t
pcix_stat_hi_word
;
uint16_t
cmd_mmrbc
;
uint16_t
cmd_mmrbc
;
uint16_t
stat_mmrbc
;
uint16_t
stat_mmrbc
;
DEBUGFUNC
(
"e1000_init_hw"
);
DEBUGFUNC
(
"e1000_init_hw"
);
/* Initialize Identification LED */
/* Initialize Identification LED */
...
@@ -910,6 +909,12 @@ e1000_setup_copper_link(struct e1000_hw *hw)
...
@@ -910,6 +909,12 @@ e1000_setup_copper_link(struct e1000_hw *hw)
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
if
(
hw
->
mac_type
==
e1000_82545_rev_3
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_SPEC_CTRL
,
&
phy_data
);
phy_data
|=
0x00000008
;
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_SPEC_CTRL
,
phy_data
);
}
if
(
hw
->
mac_type
<=
e1000_82543
||
if
(
hw
->
mac_type
<=
e1000_82543
||
hw
->
mac_type
==
e1000_82541
||
hw
->
mac_type
==
e1000_82547
||
hw
->
mac_type
==
e1000_82541
||
hw
->
mac_type
==
e1000_82547
||
hw
->
mac_type
==
e1000_82541_rev_2
||
hw
->
mac_type
==
e1000_82547_rev_2
)
hw
->
mac_type
==
e1000_82541_rev_2
||
hw
->
mac_type
==
e1000_82547_rev_2
)
...
@@ -1961,7 +1966,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
...
@@ -1961,7 +1966,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
int32_t
int32_t
e1000_check_for_link
(
struct
e1000_hw
*
hw
)
e1000_check_for_link
(
struct
e1000_hw
*
hw
)
{
{
uint32_t
rxcw
;
uint32_t
rxcw
=
0
;
uint32_t
ctrl
;
uint32_t
ctrl
;
uint32_t
status
;
uint32_t
status
;
uint32_t
rctl
;
uint32_t
rctl
;
...
@@ -1971,17 +1976,24 @@ e1000_check_for_link(struct e1000_hw *hw)
...
@@ -1971,17 +1976,24 @@ e1000_check_for_link(struct e1000_hw *hw)
DEBUGFUNC
(
"e1000_check_for_link"
);
DEBUGFUNC
(
"e1000_check_for_link"
);
ctrl
=
E1000_READ_REG
(
hw
,
CTRL
);
status
=
E1000_READ_REG
(
hw
,
STATUS
);
/* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
/* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
* set when the optics detect a signal. On older adapters, it will be
* set when the optics detect a signal. On older adapters, it will be
* cleared when there is a signal. This applies to fiber media only.
* cleared when there is a signal. This applies to fiber media only.
*/
*/
if
(
hw
->
media_type
==
e1000_media_type_fiber
)
if
((
hw
->
media_type
==
e1000_media_type_fiber
)
||
signal
=
(
hw
->
mac_type
>
e1000_82544
)
?
E1000_CTRL_SWDPIN1
:
0
;
(
hw
->
media_type
==
e1000_media_type_internal_serdes
))
{
ctrl
=
E1000_READ_REG
(
hw
,
CTRL
);
status
=
E1000_READ_REG
(
hw
,
STATUS
);
rxcw
=
E1000_READ_REG
(
hw
,
RXCW
);
rxcw
=
E1000_READ_REG
(
hw
,
RXCW
);
if
(
hw
->
media_type
==
e1000_media_type_fiber
)
{
signal
=
(
hw
->
mac_type
>
e1000_82544
)
?
E1000_CTRL_SWDPIN1
:
0
;
if
(
status
&
E1000_STATUS_LU
)
hw
->
get_link_status
=
FALSE
;
}
}
/* If we have a copper PHY then we only want to go out to the PHY
/* If we have a copper PHY then we only want to go out to the PHY
* registers to see if Auto-Neg has completed and/or if our link
* registers to see if Auto-Neg has completed and/or if our link
* status has changed. The get_link_status flag will be set if we
* status has changed. The get_link_status flag will be set if we
...
@@ -2125,8 +2137,7 @@ e1000_check_for_link(struct e1000_hw *hw)
...
@@ -2125,8 +2137,7 @@ e1000_check_for_link(struct e1000_hw *hw)
*/
*/
else
if
(((
hw
->
media_type
==
e1000_media_type_fiber
)
||
else
if
(((
hw
->
media_type
==
e1000_media_type_fiber
)
||
(
hw
->
media_type
==
e1000_media_type_internal_serdes
))
&&
(
hw
->
media_type
==
e1000_media_type_internal_serdes
))
&&
(
ctrl
&
E1000_CTRL_SLU
)
&&
(
ctrl
&
E1000_CTRL_SLU
)
&&
(
rxcw
&
E1000_RXCW_C
))
{
(
rxcw
&
E1000_RXCW_C
))
{
DEBUGOUT
(
"RXing /C/, enable AutoNeg and stop forcing link.
\r\n
"
);
DEBUGOUT
(
"RXing /C/, enable AutoNeg and stop forcing link.
\r\n
"
);
E1000_WRITE_REG
(
hw
,
TXCW
,
hw
->
txcw
);
E1000_WRITE_REG
(
hw
,
TXCW
,
hw
->
txcw
);
E1000_WRITE_REG
(
hw
,
CTRL
,
(
ctrl
&
~
E1000_CTRL_SLU
));
E1000_WRITE_REG
(
hw
,
CTRL
,
(
ctrl
&
~
E1000_CTRL_SLU
));
...
...
drivers/net/e1000/e1000_hw.h
View file @
ee86da9d
...
@@ -2019,7 +2019,7 @@ struct e1000_hw {
...
@@ -2019,7 +2019,7 @@ struct e1000_hw {
#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B
/* shift right 11 */
#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B
/* shift right 11 */
/* IGP01E1000 Specific Port Control Register - R/W */
/* IGP01E1000 Specific Port Control Register - R/W */
#define IGP01E1000_PSCR_TP_LOOPBACK 0x00
01
#define IGP01E1000_PSCR_TP_LOOPBACK 0x00
10
#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200
#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200
#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
...
@@ -2029,16 +2029,18 @@ struct e1000_hw {
...
@@ -2029,16 +2029,18 @@ struct e1000_hw {
/* IGP01E1000 Specific Port Link Health Register */
/* IGP01E1000 Specific Port Link Health Register */
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
#define IGP01E1000_PLHR_MASTER_FAULT 0x2000
#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000
#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800
/* LH */
#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800
/* LH */
#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400
/* LH */
#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400
/* LH */
#define IGP01E1000_PLHR_DATA_ERR_1 0x0200
/* LH */
#define IGP01E1000_PLHR_DATA_ERR_1 0x0200
/* LH */
#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
#define IGP01E1000_PLHR_AUTONEG_FAULT 0x00
1
0
#define IGP01E1000_PLHR_AUTONEG_FAULT 0x00
4
0
#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x00
08
#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x00
10
#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x000
4
#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x000
8
#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x000
2
#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x000
4
#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x000
1
#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x000
2
#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x000
0
#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x000
1
/* IGP01E1000 Channel Quality Register */
/* IGP01E1000 Channel Quality Register */
#define IGP01E1000_MSE_CHANNEL_D 0x000F
#define IGP01E1000_MSE_CHANNEL_D 0x000F
...
...
drivers/net/e1000/e1000_main.c
View file @
ee86da9d
...
@@ -27,55 +27,32 @@
...
@@ -27,55 +27,32 @@
*******************************************************************************/
*******************************************************************************/
#include "e1000.h"
#include "e1000.h"
#include <linux/rtnetlink.h>
/* Change Log
/* Change Log
*
*
* 5.2.51 5/14/04
* o set default configuration to 'NAPI disabled'. NAPI enabled driver
* causes kernel panic when the interface is shutdown while data is being
* transferred.
* 5.2.47 5/04/04
* o fixed ethtool -t implementation
* 5.2.45 4/29/04
* o fixed ethtool -e implementation
* o Support for ethtool ops [Stephen Hemminger (shemminger@osdl.org)]
* 5.2.42 4/26/04
* o Added support for the DPRINTK macro for enhanced error logging. Some
* parts of the patch were supplied by Jon Mason.
* o Move the register_netdevice() donw in the probe routine due to a
* loading/unloading test issue.
* o Added a long RX byte count the the extra ethtool data members for BER
* testing purposes.
* 5.2.39 3/12/04
* 5.2.39 3/12/04
* o Added support to read/write eeprom data in proper order.
* By default device eeprom is always little-endian, word
* addressable
* o Disable TSO as the default for the driver until hangs
* reported against non-IA acrhs can be root-caused.
* o Back out the CSA fix for 82547 as it continues to cause
* systems lock-ups with production systems.
* o Fixed FC high/low water mark values to actually be in the
* range of the Rx FIFO area. It was a math error.
* [Dainis Jonitis (dainis_jonitis@exigengroup.lv)]
* o Handle failure to get new resources when doing ethtool
* ring paramater changes. Previously, driver would free old,
* but fails to allocate new, causing problems. Now, driver
* allocates new, and if sucessful, frees old.
* o Changed collision threshold from 16 to 15 to comply with IEEE
* spec.
* o Toggle chip-select when checking ready status on SPI eeproms.
* o Put PHY into class A mode to pass IEEE tests on some designs.
* Designs with EEPROM word 0x7, bit 15 set will have their PHYs
* set to class A mode, rather than the default class AB.
* o Handle failures of register_netdev. Stephen Hemminger
* [shemminger@osdl.org].
* o updated README & MAN pages, number of Transmit/Receive
* descriptors may be denied depending on system resources.
*
* 5.2.30 1/14/03
* o Set VLAN filtering to IEEE 802.1Q after reset so we don't break
* SoL connections that use VLANs.
* o Allow 1000/Full setting for AutoNeg param for Fiber connections
* Jon D Mason [jonmason@us.ibm.com].
* o Race between Tx queue and Tx clean fixed with a spin lock.
* o Added netpoll support.
* o Fixed endianess bug causing ethtool loopback diags to fail on ppc.
* o Use pdev->irq rather than netdev->irq in preparation for MSI support.
* o Report driver message on user override of InterruptThrottleRate
* module parameter.
* o Change I/O address storage from uint32_t to unsigned long.
* o Added ethtool RINGPARAM support.
*
* 5.2.22 10/15/03
*/
*/
char
e1000_driver_name
[]
=
"e1000"
;
char
e1000_driver_name
[]
=
"e1000"
;
char
e1000_driver_string
[]
=
"Intel(R) PRO/1000 Network Driver"
;
char
e1000_driver_string
[]
=
"Intel(R) PRO/1000 Network Driver"
;
char
e1000_driver_version
[]
=
"5.2.
39
-k2"
;
char
e1000_driver_version
[]
=
"5.2.
52
-k2"
;
char
e1000_copyright
[]
=
"Copyright (c) 1999-2004 Intel Corporation."
;
char
e1000_copyright
[]
=
"Copyright (c) 1999-2004 Intel Corporation."
;
/* e1000_pci_tbl - PCI Device ID Table
/* e1000_pci_tbl - PCI Device ID Table
...
@@ -170,6 +147,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
...
@@ -170,6 +147,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
static
int
e1000_ioctl
(
struct
net_device
*
netdev
,
struct
ifreq
*
ifr
,
int
cmd
);
static
int
e1000_ioctl
(
struct
net_device
*
netdev
,
struct
ifreq
*
ifr
,
int
cmd
);
static
int
e1000_mii_ioctl
(
struct
net_device
*
netdev
,
struct
ifreq
*
ifr
,
static
int
e1000_mii_ioctl
(
struct
net_device
*
netdev
,
struct
ifreq
*
ifr
,
int
cmd
);
int
cmd
);
void
set_ethtool_ops
(
struct
net_device
*
netdev
);
static
void
e1000_enter_82542_rst
(
struct
e1000_adapter
*
adapter
);
static
void
e1000_enter_82542_rst
(
struct
e1000_adapter
*
adapter
);
static
void
e1000_leave_82542_rst
(
struct
e1000_adapter
*
adapter
);
static
void
e1000_leave_82542_rst
(
struct
e1000_adapter
*
adapter
);
static
inline
void
e1000_rx_checksum
(
struct
e1000_adapter
*
adapter
,
static
inline
void
e1000_rx_checksum
(
struct
e1000_adapter
*
adapter
,
...
@@ -206,7 +184,7 @@ struct notifier_block e1000_notifier_reboot = {
...
@@ -206,7 +184,7 @@ struct notifier_block e1000_notifier_reboot = {
/* Exported from other modules */
/* Exported from other modules */
extern
void
e1000_check_options
(
struct
e1000_adapter
*
adapter
);
extern
void
e1000_check_options
(
struct
e1000_adapter
*
adapter
);
extern
int
e1000_ethtool_ioctl
(
struct
net_device
*
netdev
,
struct
ifreq
*
ifr
);
static
struct
pci_driver
e1000_driver
=
{
static
struct
pci_driver
e1000_driver
=
{
.
name
=
e1000_driver_name
,
.
name
=
e1000_driver_name
,
...
@@ -224,6 +202,10 @@ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
...
@@ -224,6 +202,10 @@ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION
(
"Intel(R) PRO/1000 Network Driver"
);
MODULE_DESCRIPTION
(
"Intel(R) PRO/1000 Network Driver"
);
MODULE_LICENSE
(
"GPL"
);
MODULE_LICENSE
(
"GPL"
);
static
int
debug
=
3
;
module_param
(
debug
,
int
,
0
);
MODULE_PARM_DESC
(
debug
,
"Debug level (0=none,...,16=all)"
);
/**
/**
* e1000_init_module - Driver Registration Routine
* e1000_init_module - Driver Registration Routine
*
*
...
@@ -419,6 +401,12 @@ e1000_probe(struct pci_dev *pdev,
...
@@ -419,6 +401,12 @@ e1000_probe(struct pci_dev *pdev,
adapter
->
netdev
=
netdev
;
adapter
->
netdev
=
netdev
;
adapter
->
pdev
=
pdev
;
adapter
->
pdev
=
pdev
;
adapter
->
hw
.
back
=
adapter
;
adapter
->
hw
.
back
=
adapter
;
adapter
->
msg_enable
=
(
1
<<
debug
)
-
1
;
rtnl_lock
();
/* we need to set the name early since the DPRINTK macro needs it set */
if
(
dev_alloc_name
(
netdev
,
netdev
->
name
)
<
0
)
goto
err_free_unlock
;
mmio_start
=
pci_resource_start
(
pdev
,
BAR_0
);
mmio_start
=
pci_resource_start
(
pdev
,
BAR_0
);
mmio_len
=
pci_resource_len
(
pdev
,
BAR_0
);
mmio_len
=
pci_resource_len
(
pdev
,
BAR_0
);
...
@@ -446,6 +434,7 @@ e1000_probe(struct pci_dev *pdev,
...
@@ -446,6 +434,7 @@ e1000_probe(struct pci_dev *pdev,
netdev
->
set_mac_address
=
&
e1000_set_mac
;
netdev
->
set_mac_address
=
&
e1000_set_mac
;
netdev
->
change_mtu
=
&
e1000_change_mtu
;
netdev
->
change_mtu
=
&
e1000_change_mtu
;
netdev
->
do_ioctl
=
&
e1000_ioctl
;
netdev
->
do_ioctl
=
&
e1000_ioctl
;
set_ethtool_ops
(
netdev
);
netdev
->
tx_timeout
=
&
e1000_tx_timeout
;
netdev
->
tx_timeout
=
&
e1000_tx_timeout
;
netdev
->
watchdog_timeo
=
5
*
HZ
;
netdev
->
watchdog_timeo
=
5
*
HZ
;
#ifdef CONFIG_E1000_NAPI
#ifdef CONFIG_E1000_NAPI
...
@@ -502,7 +491,7 @@ e1000_probe(struct pci_dev *pdev,
...
@@ -502,7 +491,7 @@ e1000_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
/* make sure the EEPROM is good */
if
(
e1000_validate_eeprom_checksum
(
&
adapter
->
hw
)
<
0
)
{
if
(
e1000_validate_eeprom_checksum
(
&
adapter
->
hw
)
<
0
)
{
printk
(
KERN_ERR
"The EEPROM Checksum Is Not Valid
\n
"
);
DPRINTK
(
PROBE
,
ERR
,
"The EEPROM Checksum Is Not Valid
\n
"
);
err
=
-
EIO
;
err
=
-
EIO
;
goto
err_eeprom
;
goto
err_eeprom
;
}
}
...
@@ -536,16 +525,12 @@ e1000_probe(struct pci_dev *pdev,
...
@@ -536,16 +525,12 @@ e1000_probe(struct pci_dev *pdev,
INIT_WORK
(
&
adapter
->
tx_timeout_task
,
INIT_WORK
(
&
adapter
->
tx_timeout_task
,
(
void
(
*
)(
void
*
))
e1000_tx_timeout_task
,
netdev
);
(
void
(
*
)(
void
*
))
e1000_tx_timeout_task
,
netdev
);
if
((
err
=
register_netdev
(
netdev
)))
goto
err_register
;
/* we're going to reset, so assume we have no link for now */
/* we're going to reset, so assume we have no link for now */
netif_carrier_off
(
netdev
);
netif_carrier_off
(
netdev
);
netif_stop_queue
(
netdev
);
netif_stop_queue
(
netdev
);
printk
(
KERN_INFO
"%s: Intel(R) PRO/1000 Network Connection
\n
"
,
DPRINTK
(
PROBE
,
INFO
,
"Intel(R) PRO/1000 Network Connection
\n
"
);
netdev
->
name
);
e1000_check_options
(
adapter
);
e1000_check_options
(
adapter
);
/* Initial Wake on LAN setting
/* Initial Wake on LAN setting
...
@@ -579,7 +564,12 @@ e1000_probe(struct pci_dev *pdev,
...
@@ -579,7 +564,12 @@ e1000_probe(struct pci_dev *pdev,
e1000_reset
(
adapter
);
e1000_reset
(
adapter
);
/* since we are holding the rtnl lock already, call the no-lock version */
if
((
err
=
register_netdevice
(
netdev
)))
goto
err_register
;
cards_found
++
;
cards_found
++
;
rtnl_unlock
();
return
0
;
return
0
;
err_register:
err_register:
...
@@ -587,6 +577,8 @@ e1000_probe(struct pci_dev *pdev,
...
@@ -587,6 +577,8 @@ e1000_probe(struct pci_dev *pdev,
err_eeprom:
err_eeprom:
iounmap
(
adapter
->
hw
.
hw_addr
);
iounmap
(
adapter
->
hw
.
hw_addr
);
err_ioremap:
err_ioremap:
err_free_unlock:
rtnl_unlock
();
free_netdev
(
netdev
);
free_netdev
(
netdev
);
err_alloc_etherdev:
err_alloc_etherdev:
pci_release_regions
(
pdev
);
pci_release_regions
(
pdev
);
...
@@ -664,7 +656,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
...
@@ -664,7 +656,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
/* identify the MAC */
/* identify the MAC */
if
(
e1000_set_mac_type
(
hw
))
{
if
(
e1000_set_mac_type
(
hw
))
{
E1000_ERR
(
"Unknown MAC Type
\n
"
);
DPRINTK
(
PROBE
,
ERR
,
"Unknown MAC Type
\n
"
);
return
-
EIO
;
return
-
EIO
;
}
}
...
@@ -1391,9 +1383,8 @@ e1000_watchdog(unsigned long data)
...
@@ -1391,9 +1383,8 @@ e1000_watchdog(unsigned long data)
&
adapter
->
link_speed
,
&
adapter
->
link_speed
,
&
adapter
->
link_duplex
);
&
adapter
->
link_duplex
);
printk
(
KERN_INFO
DPRINTK
(
LINK
,
INFO
,
"NIC Link is Up %d Mbps %s
\n
"
,
"e1000: %s NIC Link is Up %d Mbps %s
\n
"
,
adapter
->
link_speed
,
netdev
->
name
,
adapter
->
link_speed
,
adapter
->
link_duplex
==
FULL_DUPLEX
?
adapter
->
link_duplex
==
FULL_DUPLEX
?
"Full Duplex"
:
"Half Duplex"
);
"Full Duplex"
:
"Half Duplex"
);
...
@@ -1406,9 +1397,7 @@ e1000_watchdog(unsigned long data)
...
@@ -1406,9 +1397,7 @@ e1000_watchdog(unsigned long data)
if
(
netif_carrier_ok
(
netdev
))
{
if
(
netif_carrier_ok
(
netdev
))
{
adapter
->
link_speed
=
0
;
adapter
->
link_speed
=
0
;
adapter
->
link_duplex
=
0
;
adapter
->
link_duplex
=
0
;
printk
(
KERN_INFO
DPRINTK
(
LINK
,
INFO
,
"NIC Link is Down
\n
"
);
"e1000: %s NIC Link is Down
\n
"
,
netdev
->
name
);
netif_carrier_off
(
netdev
);
netif_carrier_off
(
netdev
);
netif_stop_queue
(
netdev
);
netif_stop_queue
(
netdev
);
mod_timer
(
&
adapter
->
phy_info_timer
,
jiffies
+
2
*
HZ
);
mod_timer
(
&
adapter
->
phy_info_timer
,
jiffies
+
2
*
HZ
);
...
@@ -1560,33 +1549,17 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
...
@@ -1560,33 +1549,17 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
static
inline
int
static
inline
int
e1000_tx_map
(
struct
e1000_adapter
*
adapter
,
struct
sk_buff
*
skb
,
e1000_tx_map
(
struct
e1000_adapter
*
adapter
,
struct
sk_buff
*
skb
,
unsigned
int
first
)
unsigned
int
first
,
unsigned
int
max_per_txd
,
unsigned
int
nr_frags
,
unsigned
int
mss
)
{
{
struct
e1000_desc_ring
*
tx_ring
=
&
adapter
->
tx_ring
;
struct
e1000_desc_ring
*
tx_ring
=
&
adapter
->
tx_ring
;
struct
e1000_tx_desc
*
tx_desc
;
struct
e1000_buffer
*
buffer_info
;
struct
e1000_buffer
*
buffer_info
;
unsigned
int
len
=
skb
->
len
,
max_per_txd
=
E1000_MAX_DATA_PER_TXD
;
unsigned
int
len
=
skb
->
len
;
unsigned
int
offset
=
0
,
size
,
count
=
0
,
i
;
unsigned
int
offset
=
0
,
size
,
count
=
0
,
i
;
#ifdef NETIF_F_TSO
unsigned
int
mss
;
#endif
unsigned
int
nr_frags
;
unsigned
int
f
;
unsigned
int
f
;
#ifdef NETIF_F_TSO
mss
=
skb_shinfo
(
skb
)
->
tso_size
;
/* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
* drops. */
if
(
mss
)
max_per_txd
=
min
(
mss
<<
2
,
max_per_txd
);
#endif
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
len
-=
skb
->
data_len
;
len
-=
skb
->
data_len
;
i
=
tx_ring
->
next_to_use
;
i
=
tx_ring
->
next_to_use
;
while
(
len
)
{
while
(
len
)
{
...
@@ -1658,46 +1631,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
...
@@ -1658,46 +1631,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
if
(
++
i
==
tx_ring
->
count
)
i
=
0
;
if
(
++
i
==
tx_ring
->
count
)
i
=
0
;
}
}
}
}
if
(
E1000_DESC_UNUSED
(
&
adapter
->
tx_ring
)
<
count
+
2
)
{
/* There aren't enough descriptors available to queue up
* this send (need: count + 1 context desc + 1 desc gap
* to keep tail from touching head), so undo the mapping
* and abort the send. We could have done the check before
* we mapped the skb, but because of all the workarounds
* (above), it's too difficult to predict how many we're
* going to need.*/
i
=
tx_ring
->
next_to_use
;
if
(
i
==
first
)
{
/* Cleanup after e1000_tx_[csum|tso] scribbling
* on descriptors. */
tx_desc
=
E1000_TX_DESC
(
*
tx_ring
,
first
);
tx_desc
->
buffer_addr
=
0
;
tx_desc
->
lower
.
data
=
0
;
tx_desc
->
upper
.
data
=
0
;
}
while
(
count
--
)
{
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
if
(
buffer_info
->
dma
)
{
pci_unmap_page
(
adapter
->
pdev
,
buffer_info
->
dma
,
buffer_info
->
length
,
PCI_DMA_TODEVICE
);
buffer_info
->
dma
=
0
;
}
if
(
++
i
==
tx_ring
->
count
)
i
=
0
;
}
tx_ring
->
next_to_use
=
first
;
return
0
;
}
i
=
(
i
==
0
)
?
tx_ring
->
count
-
1
:
i
-
1
;
i
=
(
i
==
0
)
?
tx_ring
->
count
-
1
:
i
-
1
;
tx_ring
->
buffer_info
[
i
].
skb
=
skb
;
tx_ring
->
buffer_info
[
i
].
skb
=
skb
;
tx_ring
->
buffer_info
[
first
].
next_to_watch
=
i
;
tx_ring
->
buffer_info
[
first
].
next_to_watch
=
i
;
...
@@ -1792,27 +1725,72 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
...
@@ -1792,27 +1725,72 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
return
0
;
return
0
;
}
}
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
static
int
static
int
e1000_xmit_frame
(
struct
sk_buff
*
skb
,
struct
net_device
*
netdev
)
e1000_xmit_frame
(
struct
sk_buff
*
skb
,
struct
net_device
*
netdev
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
unsigned
int
first
;
unsigned
int
first
,
max_per_txd
=
E1000_MAX_DATA_PER_TXD
;
unsigned
int
max_txd_pwr
=
E1000_MAX_TXD_PWR
;
unsigned
int
tx_flags
=
0
;
unsigned
int
tx_flags
=
0
;
unsigned
long
flags
;
unsigned
long
flags
;
int
count
;
unsigned
int
len
=
skb
->
len
;
int
count
=
0
;
unsigned
int
mss
=
0
;
unsigned
int
nr_frags
=
0
;
unsigned
int
f
;
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
len
-=
skb
->
data_len
;
if
(
skb
->
len
<=
0
)
{
if
(
skb
->
len
<=
0
)
{
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
return
0
;
return
0
;
}
}
#ifdef NETIF_F_TSO
mss
=
skb_shinfo
(
skb
)
->
tso_size
;
/* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
* drops. */
if
(
mss
)
{
max_per_txd
=
min
(
mss
<<
2
,
max_per_txd
);
max_txd_pwr
=
fls
(
max_per_txd
)
-
1
;
}
if
((
mss
)
||
(
skb
->
ip_summed
==
CHECKSUM_HW
))
count
++
;
count
++
;
/*for sentinel desc*/
#else
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
count
++
;
#endif
count
+=
TXD_USE_COUNT
(
len
,
max_txd_pwr
);
if
(
adapter
->
pcix_82544
)
count
++
;
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
for
(
f
=
0
;
f
<
nr_frags
;
f
++
)
count
+=
TXD_USE_COUNT
(
skb_shinfo
(
skb
)
->
frags
[
f
].
size
,
max_txd_pwr
);
if
(
adapter
->
pcix_82544
)
count
+=
nr_frags
;
spin_lock_irqsave
(
&
adapter
->
tx_lock
,
flags
);
spin_lock_irqsave
(
&
adapter
->
tx_lock
,
flags
);
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
if
(
E1000_DESC_UNUSED
(
&
adapter
->
tx_ring
)
<
count
+
2
)
{
netif_stop_queue
(
netdev
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
1
;
}
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
if
(
adapter
->
hw
.
mac_type
==
e1000_82547
)
{
if
(
adapter
->
hw
.
mac_type
==
e1000_82547
)
{
if
(
e1000_82547_fifo_workaround
(
adapter
,
skb
))
{
if
(
e1000_82547_fifo_workaround
(
adapter
,
skb
))
{
netif_stop_queue
(
netdev
);
netif_stop_queue
(
netdev
);
mod_timer
(
&
adapter
->
tx_fifo_stall_timer
,
jiffies
);
mod_timer
(
&
adapter
->
tx_fifo_stall_timer
,
jiffies
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
1
;
return
1
;
}
}
}
}
...
@@ -1829,18 +1807,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
...
@@ -1829,18 +1807,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
else
if
(
e1000_tx_csum
(
adapter
,
skb
))
else
if
(
e1000_tx_csum
(
adapter
,
skb
))
tx_flags
|=
E1000_TX_FLAGS_CSUM
;
tx_flags
|=
E1000_TX_FLAGS_CSUM
;
if
((
count
=
e1000_tx_map
(
adapter
,
skb
,
first
)))
e1000_tx_queue
(
adapter
,
e1000_tx_queue
(
adapter
,
count
,
tx_flags
);
e1000_tx_map
(
adapter
,
skb
,
first
,
max_per_txd
,
nr_frags
,
mss
),
else
{
tx_flags
);
netif_stop_queue
(
netdev
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
1
;
}
netdev
->
trans_start
=
jiffies
;
netdev
->
trans_start
=
jiffies
;
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
0
;
return
0
;
}
}
...
@@ -1903,7 +1875,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
...
@@ -1903,7 +1875,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
if
((
max_frame
<
MINIMUM_ETHERNET_FRAME_SIZE
)
||
if
((
max_frame
<
MINIMUM_ETHERNET_FRAME_SIZE
)
||
(
max_frame
>
MAX_JUMBO_FRAME_SIZE
))
{
(
max_frame
>
MAX_JUMBO_FRAME_SIZE
))
{
E1000_ERR
(
"Invalid MTU setting
\n
"
);
DPRINTK
(
PROBE
,
ERR
,
"Invalid MTU setting
\n
"
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
...
@@ -1911,7 +1883,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
...
@@ -1911,7 +1883,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter
->
rx_buffer_len
=
E1000_RXBUFFER_2048
;
adapter
->
rx_buffer_len
=
E1000_RXBUFFER_2048
;
}
else
if
(
adapter
->
hw
.
mac_type
<
e1000_82543
)
{
}
else
if
(
adapter
->
hw
.
mac_type
<
e1000_82543
)
{
E1000_ERR
(
"Jumbo Frames not supported on 82542
\n
"
);
DPRINTK
(
PROBE
,
ERR
,
"Jumbo Frames not supported on 82542
\n
"
);
return
-
EINVAL
;
return
-
EINVAL
;
}
else
if
(
max_frame
<=
E1000_RXBUFFER_4096
)
{
}
else
if
(
max_frame
<=
E1000_RXBUFFER_4096
)
{
...
@@ -2193,7 +2165,6 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
...
@@ -2193,7 +2165,6 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
unsigned
int
i
,
eop
;
unsigned
int
i
,
eop
;
boolean_t
cleaned
=
FALSE
;
boolean_t
cleaned
=
FALSE
;
spin_lock
(
&
adapter
->
tx_lock
);
i
=
tx_ring
->
next_to_clean
;
i
=
tx_ring
->
next_to_clean
;
eop
=
tx_ring
->
buffer_info
[
i
].
next_to_watch
;
eop
=
tx_ring
->
buffer_info
[
i
].
next_to_watch
;
...
@@ -2236,6 +2207,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
...
@@ -2236,6 +2207,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring
->
next_to_clean
=
i
;
tx_ring
->
next_to_clean
=
i
;
spin_lock
(
&
adapter
->
tx_lock
);
if
(
cleaned
&&
netif_queue_stopped
(
netdev
)
&&
netif_carrier_ok
(
netdev
))
if
(
cleaned
&&
netif_queue_stopped
(
netdev
)
&&
netif_carrier_ok
(
netdev
))
netif_wake_queue
(
netdev
);
netif_wake_queue
(
netdev
);
...
@@ -2296,7 +2269,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
...
@@ -2296,7 +2269,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
/* All receives must fit into a single buffer */
/* All receives must fit into a single buffer */
E1000_DBG
(
"Receive packet consumed multiple buffers
\n
"
);
E1000_DBG
(
"%s: Receive packet consumed multiple buffers
\n
"
,
netdev
->
name
);
dev_kfree_skb_irq
(
skb
);
dev_kfree_skb_irq
(
skb
);
rx_desc
->
status
=
0
;
rx_desc
->
status
=
0
;
...
@@ -2513,8 +2487,6 @@ e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
...
@@ -2513,8 +2487,6 @@ e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case
SIOCGMIIREG
:
case
SIOCGMIIREG
:
case
SIOCSMIIREG
:
case
SIOCSMIIREG
:
return
e1000_mii_ioctl
(
netdev
,
ifr
,
cmd
);
return
e1000_mii_ioctl
(
netdev
,
ifr
,
cmd
);
case
SIOCETHTOOL
:
return
e1000_ethtool_ioctl
(
netdev
,
ifr
);
default:
default:
return
-
EOPNOTSUPP
;
return
-
EOPNOTSUPP
;
}
}
...
...
drivers/net/e1000/e1000_param.c
View file @
ee86da9d
...
@@ -234,7 +234,8 @@ struct e1000_option {
...
@@ -234,7 +234,8 @@ struct e1000_option {
};
};
static
int
__devinit
static
int
__devinit
e1000_validate_option
(
int
*
value
,
struct
e1000_option
*
opt
)
e1000_validate_option
(
int
*
value
,
struct
e1000_option
*
opt
,
struct
e1000_adapter
*
adapter
)
{
{
if
(
*
value
==
OPTION_UNSET
)
{
if
(
*
value
==
OPTION_UNSET
)
{
*
value
=
opt
->
def
;
*
value
=
opt
->
def
;
...
@@ -245,16 +246,17 @@ e1000_validate_option(int *value, struct e1000_option *opt)
...
@@ -245,16 +246,17 @@ e1000_validate_option(int *value, struct e1000_option *opt)
case
enable_option
:
case
enable_option
:
switch
(
*
value
)
{
switch
(
*
value
)
{
case
OPTION_ENABLED
:
case
OPTION_ENABLED
:
printk
(
KERN_INFO
"%s Enabled
\n
"
,
opt
->
name
);
DPRINTK
(
PROBE
,
INFO
,
"%s Enabled
\n
"
,
opt
->
name
);
return
0
;
return
0
;
case
OPTION_DISABLED
:
case
OPTION_DISABLED
:
printk
(
KERN_INFO
"%s Disabled
\n
"
,
opt
->
name
);
DPRINTK
(
PROBE
,
INFO
,
"%s Disabled
\n
"
,
opt
->
name
);
return
0
;
return
0
;
}
}
break
;
break
;
case
range_option
:
case
range_option
:
if
(
*
value
>=
opt
->
arg
.
r
.
min
&&
*
value
<=
opt
->
arg
.
r
.
max
)
{
if
(
*
value
>=
opt
->
arg
.
r
.
min
&&
*
value
<=
opt
->
arg
.
r
.
max
)
{
printk
(
KERN_INFO
"%s set to %i
\n
"
,
opt
->
name
,
*
value
);
DPRINTK
(
PROBE
,
INFO
,
"%s set to %i
\n
"
,
opt
->
name
,
*
value
);
return
0
;
return
0
;
}
}
break
;
break
;
...
@@ -266,7 +268,7 @@ e1000_validate_option(int *value, struct e1000_option *opt)
...
@@ -266,7 +268,7 @@ e1000_validate_option(int *value, struct e1000_option *opt)
ent
=
&
opt
->
arg
.
l
.
p
[
i
];
ent
=
&
opt
->
arg
.
l
.
p
[
i
];
if
(
*
value
==
ent
->
i
)
{
if
(
*
value
==
ent
->
i
)
{
if
(
ent
->
str
[
0
]
!=
'\0'
)
if
(
ent
->
str
[
0
]
!=
'\0'
)
printk
(
KERN_INFO
"%s
\n
"
,
ent
->
str
);
DPRINTK
(
PROBE
,
INFO
,
"%s
\n
"
,
ent
->
str
);
return
0
;
return
0
;
}
}
}
}
...
@@ -276,7 +278,7 @@ e1000_validate_option(int *value, struct e1000_option *opt)
...
@@ -276,7 +278,7 @@ e1000_validate_option(int *value, struct e1000_option *opt)
BUG
();
BUG
();
}
}
printk
(
KERN_INFO
"Invalid %s specified (%i) %s
\n
"
,
DPRINTK
(
PROBE
,
INFO
,
"Invalid %s specified (%i) %s
\n
"
,
opt
->
name
,
*
value
,
opt
->
err
);
opt
->
name
,
*
value
,
opt
->
err
);
*
value
=
opt
->
def
;
*
value
=
opt
->
def
;
return
-
1
;
return
-
1
;
...
@@ -300,9 +302,9 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -300,9 +302,9 @@ e1000_check_options(struct e1000_adapter *adapter)
{
{
int
bd
=
adapter
->
bd_number
;
int
bd
=
adapter
->
bd_number
;
if
(
bd
>=
E1000_MAX_NIC
)
{
if
(
bd
>=
E1000_MAX_NIC
)
{
printk
(
KERN_NOTICE
DPRINTK
(
PROBE
,
NOTICE
,
"Warning: no configuration for board #%i
\n
"
,
bd
);
"Warning: no configuration for board #%i
\n
"
,
bd
);
printk
(
KERN_NOTICE
"Using defaults for all values
\n
"
);
DPRINTK
(
PROBE
,
NOTICE
,
"Using defaults for all values
\n
"
);
bd
=
E1000_MAX_NIC
;
bd
=
E1000_MAX_NIC
;
}
}
...
@@ -321,7 +323,7 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -321,7 +323,7 @@ e1000_check_options(struct e1000_adapter *adapter)
E1000_MAX_TXD
:
E1000_MAX_82544_TXD
;
E1000_MAX_TXD
:
E1000_MAX_82544_TXD
;
tx_ring
->
count
=
TxDescriptors
[
bd
];
tx_ring
->
count
=
TxDescriptors
[
bd
];
e1000_validate_option
(
&
tx_ring
->
count
,
&
opt
);
e1000_validate_option
(
&
tx_ring
->
count
,
&
opt
,
adapter
);
E1000_ROUNDUP
(
tx_ring
->
count
,
REQ_TX_DESCRIPTOR_MULTIPLE
);
E1000_ROUNDUP
(
tx_ring
->
count
,
REQ_TX_DESCRIPTOR_MULTIPLE
);
}
}
{
/* Receive Descriptor Count */
{
/* Receive Descriptor Count */
...
@@ -339,7 +341,7 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -339,7 +341,7 @@ e1000_check_options(struct e1000_adapter *adapter)
E1000_MAX_82544_RXD
;
E1000_MAX_82544_RXD
;
rx_ring
->
count
=
RxDescriptors
[
bd
];
rx_ring
->
count
=
RxDescriptors
[
bd
];
e1000_validate_option
(
&
rx_ring
->
count
,
&
opt
);
e1000_validate_option
(
&
rx_ring
->
count
,
&
opt
,
adapter
);
E1000_ROUNDUP
(
rx_ring
->
count
,
REQ_RX_DESCRIPTOR_MULTIPLE
);
E1000_ROUNDUP
(
rx_ring
->
count
,
REQ_RX_DESCRIPTOR_MULTIPLE
);
}
}
{
/* Checksum Offload Enable/Disable */
{
/* Checksum Offload Enable/Disable */
...
@@ -351,7 +353,7 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -351,7 +353,7 @@ e1000_check_options(struct e1000_adapter *adapter)
};
};
int
rx_csum
=
XsumRX
[
bd
];
int
rx_csum
=
XsumRX
[
bd
];
e1000_validate_option
(
&
rx_csum
,
&
opt
);
e1000_validate_option
(
&
rx_csum
,
&
opt
,
adapter
);
adapter
->
rx_csum
=
rx_csum
;
adapter
->
rx_csum
=
rx_csum
;
}
}
{
/* Flow Control */
{
/* Flow Control */
...
@@ -373,7 +375,7 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -373,7 +375,7 @@ e1000_check_options(struct e1000_adapter *adapter)
};
};
int
fc
=
FlowControl
[
bd
];
int
fc
=
FlowControl
[
bd
];
e1000_validate_option
(
&
fc
,
&
opt
);
e1000_validate_option
(
&
fc
,
&
opt
,
adapter
);
adapter
->
hw
.
fc
=
adapter
->
hw
.
original_fc
=
fc
;
adapter
->
hw
.
fc
=
adapter
->
hw
.
original_fc
=
fc
;
}
}
{
/* Transmit Interrupt Delay */
{
/* Transmit Interrupt Delay */
...
@@ -387,7 +389,7 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -387,7 +389,7 @@ e1000_check_options(struct e1000_adapter *adapter)
};
};
adapter
->
tx_int_delay
=
TxIntDelay
[
bd
];
adapter
->
tx_int_delay
=
TxIntDelay
[
bd
];
e1000_validate_option
(
&
adapter
->
tx_int_delay
,
&
opt
);
e1000_validate_option
(
&
adapter
->
tx_int_delay
,
&
opt
,
adapter
);
}
}
{
/* Transmit Absolute Interrupt Delay */
{
/* Transmit Absolute Interrupt Delay */
struct
e1000_option
opt
=
{
struct
e1000_option
opt
=
{
...
@@ -400,7 +402,7 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -400,7 +402,7 @@ e1000_check_options(struct e1000_adapter *adapter)
};
};
adapter
->
tx_abs_int_delay
=
TxAbsIntDelay
[
bd
];
adapter
->
tx_abs_int_delay
=
TxAbsIntDelay
[
bd
];
e1000_validate_option
(
&
adapter
->
tx_abs_int_delay
,
&
opt
);
e1000_validate_option
(
&
adapter
->
tx_abs_int_delay
,
&
opt
,
adapter
);
}
}
{
/* Receive Interrupt Delay */
{
/* Receive Interrupt Delay */
struct
e1000_option
opt
=
{
struct
e1000_option
opt
=
{
...
@@ -413,7 +415,7 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -413,7 +415,7 @@ e1000_check_options(struct e1000_adapter *adapter)
};
};
adapter
->
rx_int_delay
=
RxIntDelay
[
bd
];
adapter
->
rx_int_delay
=
RxIntDelay
[
bd
];
e1000_validate_option
(
&
adapter
->
rx_int_delay
,
&
opt
);
e1000_validate_option
(
&
adapter
->
rx_int_delay
,
&
opt
,
adapter
);
}
}
{
/* Receive Absolute Interrupt Delay */
{
/* Receive Absolute Interrupt Delay */
struct
e1000_option
opt
=
{
struct
e1000_option
opt
=
{
...
@@ -426,7 +428,7 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -426,7 +428,7 @@ e1000_check_options(struct e1000_adapter *adapter)
};
};
adapter
->
rx_abs_int_delay
=
RxAbsIntDelay
[
bd
];
adapter
->
rx_abs_int_delay
=
RxAbsIntDelay
[
bd
];
e1000_validate_option
(
&
adapter
->
rx_abs_int_delay
,
&
opt
);
e1000_validate_option
(
&
adapter
->
rx_abs_int_delay
,
&
opt
,
adapter
);
}
}
{
/* Interrupt Throttling Rate */
{
/* Interrupt Throttling Rate */
struct
e1000_option
opt
=
{
struct
e1000_option
opt
=
{
...
@@ -444,13 +446,14 @@ e1000_check_options(struct e1000_adapter *adapter)
...
@@ -444,13 +446,14 @@ e1000_check_options(struct e1000_adapter *adapter)
adapter
->
itr
=
1
;
adapter
->
itr
=
1
;
break
;
break
;
case
0
:
case
0
:
printk
(
KERN_INFO
"%s turned off
\n
"
,
opt
.
name
);
DPRINTK
(
PROBE
,
INFO
,
"%s turned off
\n
"
,
opt
.
name
);
break
;
break
;
case
1
:
case
1
:
printk
(
KERN_INFO
"%s set to dynamic mode
\n
"
,
opt
.
name
);
DPRINTK
(
PROBE
,
INFO
,
"%s set to dynamic mode
\n
"
,
opt
.
name
);
break
;
break
;
default:
default:
e1000_validate_option
(
&
adapter
->
itr
,
&
opt
);
e1000_validate_option
(
&
adapter
->
itr
,
&
opt
,
adapter
);
break
;
break
;
}
}
}
}
...
@@ -482,15 +485,15 @@ e1000_check_fiber_options(struct e1000_adapter *adapter)
...
@@ -482,15 +485,15 @@ e1000_check_fiber_options(struct e1000_adapter *adapter)
bd
=
bd
>
E1000_MAX_NIC
?
E1000_MAX_NIC
:
bd
;
bd
=
bd
>
E1000_MAX_NIC
?
E1000_MAX_NIC
:
bd
;
if
((
Speed
[
bd
]
!=
OPTION_UNSET
))
{
if
((
Speed
[
bd
]
!=
OPTION_UNSET
))
{
printk
(
KERN_INFO
"Speed not valid for fiber adapters, "
DPRINTK
(
PROBE
,
INFO
,
"Speed not valid for fiber adapters, "
"parameter ignored
\n
"
);
"parameter ignored
\n
"
);
}
}
if
((
Duplex
[
bd
]
!=
OPTION_UNSET
))
{
if
((
Duplex
[
bd
]
!=
OPTION_UNSET
))
{
printk
(
KERN_INFO
"Duplex not valid for fiber adapters, "
DPRINTK
(
PROBE
,
INFO
,
"Duplex not valid for fiber adapters, "
"parameter ignored
\n
"
);
"parameter ignored
\n
"
);
}
}
if
((
AutoNeg
[
bd
]
!=
OPTION_UNSET
)
&&
(
AutoNeg
[
bd
]
!=
0x20
))
{
if
((
AutoNeg
[
bd
]
!=
OPTION_UNSET
)
&&
(
AutoNeg
[
bd
]
!=
0x20
))
{
printk
(
KERN_INFO
"AutoNeg other than Full/1000 is "
DPRINTK
(
PROBE
,
INFO
,
"AutoNeg other than Full/1000 is "
"not valid for fiber adapters, parameter ignored
\n
"
);
"not valid for fiber adapters, parameter ignored
\n
"
);
}
}
}
}
...
@@ -525,7 +528,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
...
@@ -525,7 +528,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
};
};
speed
=
Speed
[
bd
];
speed
=
Speed
[
bd
];
e1000_validate_option
(
&
speed
,
&
opt
);
e1000_validate_option
(
&
speed
,
&
opt
,
adapter
);
}
}
{
/* Duplex */
{
/* Duplex */
struct
e1000_opt_list
dplx_list
[]
=
{{
0
,
""
},
struct
e1000_opt_list
dplx_list
[]
=
{{
0
,
""
},
...
@@ -542,11 +545,11 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
...
@@ -542,11 +545,11 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
};
};
dplx
=
Duplex
[
bd
];
dplx
=
Duplex
[
bd
];
e1000_validate_option
(
&
dplx
,
&
opt
);
e1000_validate_option
(
&
dplx
,
&
opt
,
adapter
);
}
}
if
(
AutoNeg
[
bd
]
!=
OPTION_UNSET
&&
(
speed
!=
0
||
dplx
!=
0
))
{
if
(
AutoNeg
[
bd
]
!=
OPTION_UNSET
&&
(
speed
!=
0
||
dplx
!=
0
))
{
printk
(
KERN_INFO
DPRINTK
(
PROBE
,
INFO
,
"AutoNeg specified along with Speed or Duplex, "
"AutoNeg specified along with Speed or Duplex, "
"parameter ignored
\n
"
);
"parameter ignored
\n
"
);
adapter
->
hw
.
autoneg_advertised
=
AUTONEG_ADV_DEFAULT
;
adapter
->
hw
.
autoneg_advertised
=
AUTONEG_ADV_DEFAULT
;
...
@@ -595,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
...
@@ -595,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
};
};
int
an
=
AutoNeg
[
bd
];
int
an
=
AutoNeg
[
bd
];
e1000_validate_option
(
&
an
,
&
opt
);
e1000_validate_option
(
&
an
,
&
opt
,
adapter
);
adapter
->
hw
.
autoneg_advertised
=
an
;
adapter
->
hw
.
autoneg_advertised
=
an
;
}
}
...
@@ -603,78 +606,85 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
...
@@ -603,78 +606,85 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
case
0
:
case
0
:
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
if
(
Speed
[
bd
]
!=
OPTION_UNSET
||
Duplex
[
bd
]
!=
OPTION_UNSET
)
if
(
Speed
[
bd
]
!=
OPTION_UNSET
||
Duplex
[
bd
]
!=
OPTION_UNSET
)
printk
(
KERN_INFO
DPRINTK
(
PROBE
,
INFO
,
"Speed and duplex autonegotiation enabled
\n
"
);
"Speed and duplex autonegotiation enabled
\n
"
);
break
;
break
;
case
HALF_DUPLEX
:
case
HALF_DUPLEX
:
printk
(
KERN_INFO
"Half Duplex specified without Speed
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Half Duplex specified without Speed
\n
"
);
printk
(
KERN_INFO
"Using Autonegotiation at Half Duplex only
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Using Autonegotiation at Half Duplex only
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_10_HALF
|
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_10_HALF
|
ADVERTISE_100_HALF
;
ADVERTISE_100_HALF
;
break
;
break
;
case
FULL_DUPLEX
:
case
FULL_DUPLEX
:
printk
(
KERN_INFO
"Full Duplex specified without Speed
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Full Duplex specified without Speed
\n
"
);
printk
(
KERN_INFO
"Using Autonegotiation at Full Duplex only
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Using Autonegotiation at Full Duplex only
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_10_FULL
|
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_10_FULL
|
ADVERTISE_100_FULL
|
ADVERTISE_100_FULL
|
ADVERTISE_1000_FULL
;
ADVERTISE_1000_FULL
;
break
;
break
;
case
SPEED_10
:
case
SPEED_10
:
printk
(
KERN_INFO
"10 Mbps Speed specified without Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
printk
(
KERN_INFO
"Using Autonegotiation at 10 Mbps only
\n
"
);
"10 Mbps Speed specified without Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Using Autonegotiation at 10 Mbps only
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_10_HALF
|
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_10_HALF
|
ADVERTISE_10_FULL
;
ADVERTISE_10_FULL
;
break
;
break
;
case
SPEED_10
+
HALF_DUPLEX
:
case
SPEED_10
+
HALF_DUPLEX
:
printk
(
KERN_INFO
"Forcing to 10 Mbps Half Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Forcing to 10 Mbps Half Duplex
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
0
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
0
;
adapter
->
hw
.
forced_speed_duplex
=
e1000_10_half
;
adapter
->
hw
.
forced_speed_duplex
=
e1000_10_half
;
adapter
->
hw
.
autoneg_advertised
=
0
;
adapter
->
hw
.
autoneg_advertised
=
0
;
break
;
break
;
case
SPEED_10
+
FULL_DUPLEX
:
case
SPEED_10
+
FULL_DUPLEX
:
printk
(
KERN_INFO
"Forcing to 10 Mbps Full Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Forcing to 10 Mbps Full Duplex
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
0
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
0
;
adapter
->
hw
.
forced_speed_duplex
=
e1000_10_full
;
adapter
->
hw
.
forced_speed_duplex
=
e1000_10_full
;
adapter
->
hw
.
autoneg_advertised
=
0
;
adapter
->
hw
.
autoneg_advertised
=
0
;
break
;
break
;
case
SPEED_100
:
case
SPEED_100
:
printk
(
KERN_INFO
"100 Mbps Speed specified without Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
printk
(
KERN_INFO
"Using Autonegotiation at 100 Mbps only
\n
"
);
"100 Mbps Speed specified without Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Using Autonegotiation at 100 Mbps only
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_100_HALF
|
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_100_HALF
|
ADVERTISE_100_FULL
;
ADVERTISE_100_FULL
;
break
;
break
;
case
SPEED_100
+
HALF_DUPLEX
:
case
SPEED_100
+
HALF_DUPLEX
:
printk
(
KERN_INFO
"Forcing to 100 Mbps Half Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Forcing to 100 Mbps Half Duplex
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
0
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
0
;
adapter
->
hw
.
forced_speed_duplex
=
e1000_100_half
;
adapter
->
hw
.
forced_speed_duplex
=
e1000_100_half
;
adapter
->
hw
.
autoneg_advertised
=
0
;
adapter
->
hw
.
autoneg_advertised
=
0
;
break
;
break
;
case
SPEED_100
+
FULL_DUPLEX
:
case
SPEED_100
+
FULL_DUPLEX
:
printk
(
KERN_INFO
"Forcing to 100 Mbps Full Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Forcing to 100 Mbps Full Duplex
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
0
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
0
;
adapter
->
hw
.
forced_speed_duplex
=
e1000_100_full
;
adapter
->
hw
.
forced_speed_duplex
=
e1000_100_full
;
adapter
->
hw
.
autoneg_advertised
=
0
;
adapter
->
hw
.
autoneg_advertised
=
0
;
break
;
break
;
case
SPEED_1000
:
case
SPEED_1000
:
printk
(
KERN_INFO
"1000 Mbps Speed specified without Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
printk
(
KERN_INFO
"1000 Mbps Speed specified without Duplex
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Using Autonegotiation at 1000 Mbps Full Duplex only
\n
"
);
"Using Autonegotiation at 1000 Mbps Full Duplex only
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_1000_FULL
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_1000_FULL
;
break
;
break
;
case
SPEED_1000
+
HALF_DUPLEX
:
case
SPEED_1000
+
HALF_DUPLEX
:
printk
(
KERN_INFO
"Half Duplex is not supported at 1000 Mbps
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
printk
(
KERN_INFO
"Half Duplex is not supported at 1000 Mbps
\n
"
);
DPRINTK
(
PROBE
,
INFO
,
"Using Autonegotiation at 1000 Mbps Full Duplex only
\n
"
);
"Using Autonegotiation at 1000 Mbps Full Duplex only
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_1000_FULL
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_1000_FULL
;
break
;
break
;
case
SPEED_1000
+
FULL_DUPLEX
:
case
SPEED_1000
+
FULL_DUPLEX
:
printk
(
KERN_INFO
DPRINTK
(
PROBE
,
INFO
,
"Using Autonegotiation at 1000 Mbps Full Duplex only
\n
"
);
"Using Autonegotiation at 1000 Mbps Full Duplex only
\n
"
);
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg
=
adapter
->
fc_autoneg
=
1
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_1000_FULL
;
adapter
->
hw
.
autoneg_advertised
=
ADVERTISE_1000_FULL
;
...
@@ -685,7 +695,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
...
@@ -685,7 +695,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if
(
e1000_validate_mdi_setting
(
&
(
adapter
->
hw
))
<
0
)
{
if
(
e1000_validate_mdi_setting
(
&
(
adapter
->
hw
))
<
0
)
{
printk
(
KERN_INFO
"Speed, AutoNeg and MDI-X specifications are "
DPRINTK
(
PROBE
,
INFO
,
"Speed, AutoNeg and MDI-X specifications are "
"incompatible. Setting MDI-X to a compatible value.
\n
"
);
"incompatible. Setting MDI-X to a compatible value.
\n
"
);
}
}
}
}
...
...
drivers/net/ibm_emac/Makefile
0 → 100644
View file @
ee86da9d
#
# Makefile for the IBM PPC4xx EMAC controllers
#
obj-$(CONFIG_IBM_EMAC)
+=
ibm_emac.o
ibm_emac-objs
:=
ibm_emac_mal.o ibm_emac_core.o ibm_emac_phy.o
# Only need this if you want to see additional debug messages
ifeq
($(CONFIG_IBM_EMAC_ERRMSG), y)
ibm_emac-objs
+=
ibm_emac_debug.o
endif
drivers/net/ibm_emac/ibm_emac.h
0 → 100644
View file @
ee86da9d
/*
* ibm_emac.h
*
*
* Armin Kuster akuster@mvista.com
* June, 2002
*
* Copyright 2002 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _IBM_EMAC_H_
#define _IBM_EMAC_H_
/* General defines needed for the driver */
/* Emac */
typedef
struct
emac_regs
{
u32
em0mr0
;
u32
em0mr1
;
u32
em0tmr0
;
u32
em0tmr1
;
u32
em0rmr
;
u32
em0isr
;
u32
em0iser
;
u32
em0iahr
;
u32
em0ialr
;
u32
em0vtpid
;
u32
em0vtci
;
u32
em0ptr
;
u32
em0iaht1
;
u32
em0iaht2
;
u32
em0iaht3
;
u32
em0iaht4
;
u32
em0gaht1
;
u32
em0gaht2
;
u32
em0gaht3
;
u32
em0gaht4
;
u32
em0lsah
;
u32
em0lsal
;
u32
em0ipgvr
;
u32
em0stacr
;
u32
em0trtr
;
u32
em0rwmr
;
}
emac_t
;
/* MODE REG 0 */
#define EMAC_M0_RXI 0x80000000
#define EMAC_M0_TXI 0x40000000
#define EMAC_M0_SRST 0x20000000
#define EMAC_M0_TXE 0x10000000
#define EMAC_M0_RXE 0x08000000
#define EMAC_M0_WKE 0x04000000
/* MODE Reg 1 */
#define EMAC_M1_FDE 0x80000000
#define EMAC_M1_ILE 0x40000000
#define EMAC_M1_VLE 0x20000000
#define EMAC_M1_EIFC 0x10000000
#define EMAC_M1_APP 0x08000000
#define EMAC_M1_AEMI 0x02000000
#define EMAC_M1_IST 0x01000000
#define EMAC_M1_MF_1000GPCS 0x00c00000
/* Internal GPCS */
#define EMAC_M1_MF_1000MBPS 0x00800000
/* External GPCS */
#define EMAC_M1_MF_100MBPS 0x00400000
#define EMAC_M1_RFS_16K 0x00280000
/* 000 for 512 byte */
#define EMAC_M1_TR 0x00008000
#ifdef CONFIG_IBM_EMAC4
#define EMAC_M1_RFS_8K 0x00200000
#define EMAC_M1_RFS_4K 0x00180000
#define EMAC_M1_RFS_2K 0x00100000
#define EMAC_M1_RFS_1K 0x00080000
#define EMAC_M1_TX_FIFO_16K 0x00050000
/* 0's for 512 byte */
#define EMAC_M1_TX_FIFO_8K 0x00040000
#define EMAC_M1_TX_FIFO_4K 0x00030000
#define EMAC_M1_TX_FIFO_2K 0x00020000
#define EMAC_M1_TX_FIFO_1K 0x00010000
#define EMAC_M1_TX_TR 0x00008000
#define EMAC_M1_TX_MWSW 0x00001000
/* 0 wait for status */
#define EMAC_M1_JUMBO_ENABLE 0x00000800
/* Upt to 9Kr status */
#define EMAC_M1_OPB_CLK_66 0x00000008
/* 66Mhz */
#define EMAC_M1_OPB_CLK_83 0x00000010
/* 83Mhz */
#define EMAC_M1_OPB_CLK_100 0x00000018
/* 100Mhz */
#define EMAC_M1_OPB_CLK_100P 0x00000020
/* 100Mhz+ */
#else
/* CONFIG_IBM_EMAC4 */
#define EMAC_M1_RFS_4K 0x00300000
/* ~4k for 512 byte */
#define EMAC_M1_RFS_2K 0x00200000
#define EMAC_M1_RFS_1K 0x00100000
#define EMAC_M1_TX_FIFO_2K 0x00080000
/* 0's for 512 byte */
#define EMAC_M1_TX_FIFO_1K 0x00040000
#define EMAC_M1_TR0_DEPEND 0x00010000
/* 0'x for single packet */
#define EMAC_M1_TR1_DEPEND 0x00004000
#define EMAC_M1_TR1_MULTI 0x00002000
#define EMAC_M1_JUMBO_ENABLE 0x00001000
#endif
/* CONFIG_IBM_EMAC4 */
#define EMAC_M1_BASE (EMAC_M1_TX_FIFO_2K | \
EMAC_M1_APP | \
EMAC_M1_TR)
/* Transmit Mode Register 0 */
#define EMAC_TMR0_GNP0 0x80000000
#define EMAC_TMR0_GNP1 0x40000000
#define EMAC_TMR0_GNPD 0x20000000
#define EMAC_TMR0_FC 0x10000000
#define EMAC_TMR0_TFAE_2_32 0x00000001
#define EMAC_TMR0_TFAE_4_64 0x00000002
#define EMAC_TMR0_TFAE_8_128 0x00000003
#define EMAC_TMR0_TFAE_16_256 0x00000004
#define EMAC_TMR0_TFAE_32_512 0x00000005
#define EMAC_TMR0_TFAE_64_1024 0x00000006
#define EMAC_TMR0_TFAE_128_2048 0x00000007
/* Receive Mode Register */
#define EMAC_RMR_SP 0x80000000
#define EMAC_RMR_SFCS 0x40000000
#define EMAC_RMR_ARRP 0x20000000
#define EMAC_RMR_ARP 0x10000000
#define EMAC_RMR_AROP 0x08000000
#define EMAC_RMR_ARPI 0x04000000
#define EMAC_RMR_PPP 0x02000000
#define EMAC_RMR_PME 0x01000000
#define EMAC_RMR_PMME 0x00800000
#define EMAC_RMR_IAE 0x00400000
#define EMAC_RMR_MIAE 0x00200000
#define EMAC_RMR_BAE 0x00100000
#define EMAC_RMR_MAE 0x00080000
#define EMAC_RMR_RFAF_2_32 0x00000001
#define EMAC_RMR_RFAF_4_64 0x00000002
#define EMAC_RMR_RFAF_8_128 0x00000003
#define EMAC_RMR_RFAF_16_256 0x00000004
#define EMAC_RMR_RFAF_32_512 0x00000005
#define EMAC_RMR_RFAF_64_1024 0x00000006
#define EMAC_RMR_RFAF_128_2048 0x00000007
#define EMAC_RMR_BASE (EMAC_RMR_IAE | EMAC_RMR_BAE)
/* Interrupt Status & enable Regs */
#define EMAC_ISR_OVR 0x02000000
#define EMAC_ISR_PP 0x01000000
#define EMAC_ISR_BP 0x00800000
#define EMAC_ISR_RP 0x00400000
#define EMAC_ISR_SE 0x00200000
#define EMAC_ISR_ALE 0x00100000
#define EMAC_ISR_BFCS 0x00080000
#define EMAC_ISR_PTLE 0x00040000
#define EMAC_ISR_ORE 0x00020000
#define EMAC_ISR_IRE 0x00010000
#define EMAC_ISR_DBDM 0x00000200
#define EMAC_ISR_DB0 0x00000100
#define EMAC_ISR_SE0 0x00000080
#define EMAC_ISR_TE0 0x00000040
#define EMAC_ISR_DB1 0x00000020
#define EMAC_ISR_SE1 0x00000010
#define EMAC_ISR_TE1 0x00000008
#define EMAC_ISR_MOS 0x00000002
#define EMAC_ISR_MOF 0x00000001
/* STA CONTROL REG */
#define EMAC_STACR_OC 0x00008000
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_WRITE 0x00002000
#define EMAC_STACR_READ 0x00001000
#define EMAC_STACR_CLK_83MHZ 0x00000800
/* 0's for 50Mhz */
#define EMAC_STACR_CLK_66MHZ 0x00000400
#define EMAC_STACR_CLK_100MHZ 0x00000C00
/* Transmit Request Threshold Register */
#define EMAC_TRTR_1600 0x18000000
/* 0's for 64 Bytes */
#define EMAC_TRTR_1024 0x0f000000
#define EMAC_TRTR_512 0x07000000
#define EMAC_TRTR_256 0x03000000
#define EMAC_TRTR_192 0x10000000
#define EMAC_TRTR_128 0x01000000
#define EMAC_TX_CTRL_GFCS 0x0200
#define EMAC_TX_CTRL_GP 0x0100
#define EMAC_TX_CTRL_ISA 0x0080
#define EMAC_TX_CTRL_RSA 0x0040
#define EMAC_TX_CTRL_IVT 0x0020
#define EMAC_TX_CTRL_RVT 0x0010
#define EMAC_TX_CTRL_TAH_CSUM 0x000e
/* TAH only */
#define EMAC_TX_CTRL_TAH_SEG4 0x000a
/* TAH only */
#define EMAC_TX_CTRL_TAH_SEG3 0x0008
/* TAH only */
#define EMAC_TX_CTRL_TAH_SEG2 0x0006
/* TAH only */
#define EMAC_TX_CTRL_TAH_SEG1 0x0004
/* TAH only */
#define EMAC_TX_CTRL_TAH_SEG0 0x0002
/* TAH only */
#define EMAC_TX_CTRL_TAH_DIS 0x0000
/* TAH only */
#define EMAC_TX_CTRL_DFLT ( \
MAL_TX_CTRL_INTR | EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP )
/* madmal transmit status / Control bits */
#define EMAC_TX_ST_BFCS 0x0200
#define EMAC_TX_ST_BPP 0x0100
#define EMAC_TX_ST_LCS 0x0080
#define EMAC_TX_ST_ED 0x0040
#define EMAC_TX_ST_EC 0x0020
#define EMAC_TX_ST_LC 0x0010
#define EMAC_TX_ST_MC 0x0008
#define EMAC_TX_ST_SC 0x0004
#define EMAC_TX_ST_UR 0x0002
#define EMAC_TX_ST_SQE 0x0001
/* madmal receive status / Control bits */
#define EMAC_RX_ST_OE 0x0200
#define EMAC_RX_ST_PP 0x0100
#define EMAC_RX_ST_BP 0x0080
#define EMAC_RX_ST_RP 0x0040
#define EMAC_RX_ST_SE 0x0020
#define EMAC_RX_ST_AE 0x0010
#define EMAC_RX_ST_BFCS 0x0008
#define EMAC_RX_ST_PTL 0x0004
#define EMAC_RX_ST_ORE 0x0002
#define EMAC_RX_ST_IRE 0x0001
#define EMAC_BAD_RX_PACKET 0x02ff
#define EMAC_CSUM_VER_ERROR 0x0003
/* identify a bad rx packet dependent on emac features */
#ifdef CONFIG_IBM_EMAC4
#define EMAC_IS_BAD_RX_PACKET(desc) \
(((desc & (EMAC_BAD_RX_PACKET & ~EMAC_CSUM_VER_ERROR)) || \
((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_ORE) || \
((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_IRE)))
#else
#define EMAC_IS_BAD_RX_PACKET(desc) \
(desc & EMAC_BAD_RX_PACKET)
#endif
/* Revision specific EMAC register defaults */
#ifdef CONFIG_IBM_EMAC4
#define EMAC_M1_DEFAULT (EMAC_M1_BASE | \
EMAC_M1_OPB_CLK_83 | \
EMAC_M1_TX_MWSW)
#define EMAC_RMR_DEFAULT (EMAC_RMR_BASE | \
EMAC_RMR_RFAF_128_2048)
#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP0 | \
EMAC_TMR0_TFAE_128_2048)
#define EMAC_TRTR_DEFAULT EMAC_TRTR_1024
#else
/* !CONFIG_IBM_EMAC4 */
#define EMAC_M1_DEFAULT EMAC_M1_BASE
#define EMAC_RMR_DEFAULT EMAC_RMR_BASE
#define EMAC_TMR0_XMIT EMAC_TMR0_GNP0
#define EMAC_TRTR_DEFAULT EMAC_TRTR_1600
#endif
/* CONFIG_IBM_EMAC4 */
/* SoC implementation specific EMAC register defaults */
#if defined(CONFIG_440GP)
#define EMAC_RWMR_DEFAULT 0x80009000
#define EMAC_TMR0_DEFAULT 0x00000000
#define EMAC_TMR1_DEFAULT 0xf8640000
#elif defined(CONFIG_440GX)
#define EMAC_RWMR_DEFAULT 0x1000a200
#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_128_2048
#define EMAC_TMR1_DEFAULT 0x88810000
#else
#define EMAC_RWMR_DEFAULT 0x0f002000
#define EMAC_TMR0_DEFAULT 0x00000000
#define EMAC_TMR1_DEFAULT 0x380f0000
#endif
/* CONFIG_440GP */
#endif
drivers/net/ibm_emac/ibm_emac_core.c
0 → 100644
View file @
ee86da9d
/*
* ibm_emac_core.c
*
* Ethernet driver for the built in ethernet on the IBM 4xx PowerPC
* processors.
*
* (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
*
* Based on original work by
*
* Armin Kuster <akuster@mvista.com>
* Johnnie Peters <jpeters@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
* TODO
* - Check for races in the "remove" code path
* - Add some Power Management to the MAC and the PHY
* - Audit remaining of non-rewritten code (--BenH)
* - Cleanup message display using msglevel mecanism
* - Address all errata
* - Audit all register update paths to ensure they
* are being written post soft reset if required.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <asm/processor.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/ocp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/crc32.h>
#include "ibm_emac_core.h"
//#define MDIO_DEBUG(fmt) printk fmt
#define MDIO_DEBUG(fmt)
//#define LINK_DEBUG(fmt) printk fmt
#define LINK_DEBUG(fmt)
//#define PKT_DEBUG(fmt) printk fmt
#define PKT_DEBUG(fmt)
#define DRV_NAME "emac"
#define DRV_VERSION "2.0"
#define DRV_AUTHOR "Benjamin Herrenschmidt <benh@kernel.crashing.org>"
#define DRV_DESC "IBM EMAC Ethernet driver"
/*
* When mdio_idx >= 0, contains a list of emac ocp_devs
* that have had their initialization deferred until the
* common MDIO controller has been initialized.
*/
LIST_HEAD
(
emac_init_list
);
MODULE_AUTHOR
(
DRV_AUTHOR
);
MODULE_DESCRIPTION
(
DRV_DESC
);
MODULE_LICENSE
(
"GPL"
);
static
int
skb_res
=
SKB_RES
;
module_param
(
skb_res
,
int
,
0444
);
MODULE_PARM_DESC
(
skb_res
,
"Amount of data to reserve on skb buffs
\n
"
"The 405 handles a misaligned IP header fine but
\n
"
"this can help if you are routing to a tunnel or a
\n
"
"device that needs aligned data. 0..2"
);
#define RGMII_PRIV(ocpdev) ((struct ibm_ocp_rgmii*)ocp_get_drvdata(ocpdev))
static
unsigned
int
rgmii_enable
[]
=
{
RGMII_RTBI
,
RGMII_RGMII
,
RGMII_TBI
,
RGMII_GMII
};
static
unsigned
int
rgmii_speed_mask
[]
=
{
0
,
0
,
RGMII_MII2_SPDMASK
,
RGMII_MII3_SPDMASK
};
static
unsigned
int
rgmii_speed100
[]
=
{
0
,
0
,
RGMII_MII2_100MB
,
RGMII_MII3_100MB
};
static
unsigned
int
rgmii_speed1000
[]
=
{
0
,
0
,
RGMII_MII2_1000MB
,
RGMII_MII3_1000MB
};
#define ZMII_PRIV(ocpdev) ((struct ibm_ocp_zmii*)ocp_get_drvdata(ocpdev))
static
unsigned
int
zmii_enable
[][
4
]
=
{
{
ZMII_SMII0
,
ZMII_RMII0
,
ZMII_MII0
,
~
(
ZMII_MDI1
|
ZMII_MDI2
|
ZMII_MDI3
)},
{
ZMII_SMII1
,
ZMII_RMII1
,
ZMII_MII1
,
~
(
ZMII_MDI0
|
ZMII_MDI2
|
ZMII_MDI3
)},
{
ZMII_SMII2
,
ZMII_RMII2
,
ZMII_MII2
,
~
(
ZMII_MDI0
|
ZMII_MDI1
|
ZMII_MDI3
)},
{
ZMII_SMII3
,
ZMII_RMII3
,
ZMII_MII3
,
~
(
ZMII_MDI0
|
ZMII_MDI1
|
ZMII_MDI2
)}
};
static
unsigned
int
mdi_enable
[]
=
{
ZMII_MDI0
,
ZMII_MDI1
,
ZMII_MDI2
,
ZMII_MDI3
};
static
unsigned
int
zmii_speed
=
0x0
;
static
unsigned
int
zmii_speed100
[]
=
{
ZMII_MII0_100MB
,
ZMII_MII1_100MB
};
/* Since multiple EMACs share MDIO lines in various ways, we need
* to avoid re-using the same PHY ID in cases where the arch didn't
* setup precise phy_map entries
*/
static
u32
busy_phy_map
=
0
;
/* If EMACs share a common MDIO device, this points to it */
static
struct
net_device
*
mdio_ndev
=
NULL
;
struct
emac_def_dev
{
struct
list_head
link
;
struct
ocp_device
*
ocpdev
;
struct
ibm_ocp_mal
*
mal
;
};
static
struct
net_device_stats
*
emac_stats
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
return
&
fep
->
stats
;
};
static
int
emac_init_rgmii
(
struct
ocp_device
*
rgmii_dev
,
int
input
,
int
phy_mode
)
{
struct
ibm_ocp_rgmii
*
rgmii
=
RGMII_PRIV
(
rgmii_dev
);
const
char
*
mode_name
[]
=
{
"RTBI"
,
"RGMII"
,
"TBI"
,
"GMII"
};
int
mode
=
-
1
;
if
(
!
rgmii
)
{
rgmii
=
kmalloc
(
sizeof
(
struct
ibm_ocp_rgmii
),
GFP_KERNEL
);
if
(
rgmii
==
NULL
)
{
printk
(
KERN_ERR
"rgmii%d: Out of memory allocating RGMII structure!
\n
"
,
rgmii_dev
->
def
->
index
);
return
-
ENOMEM
;
}
memset
(
rgmii
,
0
,
sizeof
(
*
rgmii
));
rgmii
->
base
=
(
struct
rgmii_regs
*
)
ioremap
(
rgmii_dev
->
def
->
paddr
,
sizeof
(
*
rgmii
->
base
));
if
(
rgmii
->
base
==
NULL
)
{
printk
(
KERN_ERR
"rgmii%d: Cannot ioremap bridge registers!
\n
"
,
rgmii_dev
->
def
->
index
);
kfree
(
rgmii
);
return
-
ENOMEM
;
}
ocp_set_drvdata
(
rgmii_dev
,
rgmii
);
}
if
(
phy_mode
)
{
switch
(
phy_mode
)
{
case
PHY_MODE_GMII
:
mode
=
GMII
;
break
;
case
PHY_MODE_TBI
:
mode
=
TBI
;
break
;
case
PHY_MODE_RTBI
:
mode
=
RTBI
;
break
;
case
PHY_MODE_RGMII
:
default:
mode
=
RGMII
;
}
rgmii
->
base
->
fer
&=
~
RGMII_FER_MASK
(
input
);
rgmii
->
base
->
fer
|=
rgmii_enable
[
mode
]
<<
(
4
*
input
);
}
else
{
switch
((
rgmii
->
base
->
fer
&
RGMII_FER_MASK
(
input
))
>>
(
4
*
input
))
{
case
RGMII_RTBI
:
mode
=
RTBI
;
break
;
case
RGMII_RGMII
:
mode
=
RGMII
;
break
;
case
RGMII_TBI
:
mode
=
TBI
;
break
;
case
RGMII_GMII
:
mode
=
GMII
;
}
}
/* Set mode to RGMII if nothing valid is detected */
if
(
mode
<
0
)
mode
=
RGMII
;
printk
(
KERN_NOTICE
"rgmii%d: input %d in %s mode
\n
"
,
rgmii_dev
->
def
->
index
,
input
,
mode_name
[
mode
]);
rgmii
->
mode
[
input
]
=
mode
;
rgmii
->
users
++
;
return
0
;
}
static
void
emac_rgmii_port_speed
(
struct
ocp_device
*
ocpdev
,
int
input
,
int
speed
)
{
struct
ibm_ocp_rgmii
*
rgmii
=
RGMII_PRIV
(
ocpdev
);
unsigned
int
rgmii_speed
;
rgmii_speed
=
in_be32
(
&
rgmii
->
base
->
ssr
);
rgmii_speed
&=
~
rgmii_speed_mask
[
input
];
if
(
speed
==
1000
)
rgmii_speed
|=
rgmii_speed1000
[
input
];
else
if
(
speed
==
100
)
rgmii_speed
|=
rgmii_speed100
[
input
];
out_be32
(
&
rgmii
->
base
->
ssr
,
rgmii_speed
);
}
static
void
emac_close_rgmii
(
struct
ocp_device
*
ocpdev
)
{
struct
ibm_ocp_rgmii
*
rgmii
=
RGMII_PRIV
(
ocpdev
);
BUG_ON
(
!
rgmii
||
rgmii
->
users
==
0
);
if
(
!--
rgmii
->
users
)
{
ocp_set_drvdata
(
ocpdev
,
NULL
);
iounmap
((
void
*
)
rgmii
->
base
);
kfree
(
rgmii
);
}
}
static
int
emac_init_zmii
(
struct
ocp_device
*
zmii_dev
,
int
input
,
int
phy_mode
)
{
struct
ibm_ocp_zmii
*
zmii
=
ZMII_PRIV
(
zmii_dev
);
const
char
*
mode_name
[]
=
{
"SMII"
,
"RMII"
,
"MII"
};
int
mode
=
-
1
;
if
(
!
zmii
)
{
zmii
=
kmalloc
(
sizeof
(
struct
ibm_ocp_zmii
),
GFP_KERNEL
);
if
(
zmii
==
NULL
)
{
printk
(
KERN_ERR
"zmii%d: Out of memory allocating ZMII structure!
\n
"
,
zmii_dev
->
def
->
index
);
return
-
ENOMEM
;
}
memset
(
zmii
,
0
,
sizeof
(
*
zmii
));
zmii
->
base
=
(
struct
zmii_regs
*
)
ioremap
(
zmii_dev
->
def
->
paddr
,
sizeof
(
*
zmii
->
base
));
if
(
zmii
->
base
==
NULL
)
{
printk
(
KERN_ERR
"zmii%d: Cannot ioremap bridge registers!
\n
"
,
zmii_dev
->
def
->
index
);
kfree
(
zmii
);
return
-
ENOMEM
;
}
ocp_set_drvdata
(
zmii_dev
,
zmii
);
}
if
(
phy_mode
)
{
switch
(
phy_mode
)
{
case
PHY_MODE_MII
:
mode
=
MII
;
break
;
case
PHY_MODE_RMII
:
mode
=
RMII
;
break
;
case
PHY_MODE_SMII
:
default:
mode
=
SMII
;
}
zmii
->
base
->
fer
&=
~
ZMII_FER_MASK
(
input
);
zmii
->
base
->
fer
|=
zmii_enable
[
input
][
mode
];
}
else
{
switch
((
zmii
->
base
->
fer
&
ZMII_FER_MASK
(
input
))
<<
(
4
*
input
))
{
case
ZMII_MII0
:
mode
=
MII
;
break
;
case
ZMII_RMII0
:
mode
=
RMII
;
break
;
case
ZMII_SMII0
:
mode
=
SMII
;
}
}
/* Set mode to SMII if nothing valid is detected */
if
(
mode
<
0
)
mode
=
SMII
;
printk
(
KERN_NOTICE
"zmii%d: input %d in %s mode
\n
"
,
zmii_dev
->
def
->
index
,
input
,
mode_name
[
mode
]);
zmii
->
mode
[
input
]
=
mode
;
zmii
->
users
++
;
return
0
;
}
static
void
emac_enable_zmii_port
(
struct
ocp_device
*
ocpdev
,
int
input
)
{
u32
mask
;
struct
ibm_ocp_zmii
*
zmii
=
ZMII_PRIV
(
ocpdev
);
mask
=
in_be32
(
&
zmii
->
base
->
fer
);
mask
&=
zmii_enable
[
input
][
MDI
];
/* turn all non enabled MDI's off */
mask
|=
zmii_enable
[
input
][
zmii
->
mode
[
input
]]
|
mdi_enable
[
input
];
out_be32
(
&
zmii
->
base
->
fer
,
mask
);
}
static
void
emac_zmii_port_speed
(
struct
ocp_device
*
ocpdev
,
int
input
,
int
speed
)
{
struct
ibm_ocp_zmii
*
zmii
=
ZMII_PRIV
(
ocpdev
);
if
(
speed
==
100
)
zmii_speed
|=
zmii_speed100
[
input
];
else
zmii_speed
&=
~
zmii_speed100
[
input
];
out_be32
(
&
zmii
->
base
->
ssr
,
zmii_speed
);
}
static
void
emac_close_zmii
(
struct
ocp_device
*
ocpdev
)
{
struct
ibm_ocp_zmii
*
zmii
=
ZMII_PRIV
(
ocpdev
);
BUG_ON
(
!
zmii
||
zmii
->
users
==
0
);
if
(
!--
zmii
->
users
)
{
ocp_set_drvdata
(
ocpdev
,
NULL
);
iounmap
((
void
*
)
zmii
->
base
);
kfree
(
zmii
);
}
}
int
emac_phy_read
(
struct
net_device
*
dev
,
int
mii_id
,
int
reg
)
{
uint32_t
stacr
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
emac_t
*
emacp
=
fep
->
emacp
;
MDIO_DEBUG
((
"%s: phy_read, id: 0x%x, reg: 0x%x
\n
"
,
dev
->
name
,
mii_id
,
reg
));
/* Enable proper ZMII port */
if
(
fep
->
zmii_dev
)
emac_enable_zmii_port
(
fep
->
zmii_dev
,
fep
->
zmii_input
);
/* Use the EMAC that has the MDIO port */
if
(
fep
->
mdio_dev
)
{
dev
=
fep
->
mdio_dev
;
fep
=
dev
->
priv
;
emacp
=
fep
->
emacp
;
}
udelay
(
MDIO_DELAY
);
if
((
in_be32
(
&
emacp
->
em0stacr
)
&
EMAC_STACR_OC
)
==
0
)
{
printk
(
KERN_WARNING
"%s: PHY read timeout #1!
\n
"
,
dev
->
name
);
return
-
1
;
}
/* Clear the speed bits and make a read request to the PHY */
stacr
=
((
EMAC_STACR_READ
|
(
reg
&
0x1f
))
&
~
EMAC_STACR_CLK_100MHZ
);
stacr
|=
((
mii_id
&
0x1F
)
<<
5
);
out_be32
(
&
emacp
->
em0stacr
,
stacr
);
udelay
(
MDIO_DELAY
);
stacr
=
in_be32
(
&
emacp
->
em0stacr
);
if
((
stacr
&
EMAC_STACR_OC
)
==
0
)
{
printk
(
KERN_WARNING
"%s: PHY read timeout #2!
\n
"
,
dev
->
name
);
return
-
1
;
}
/* Check for a read error */
if
(
stacr
&
EMAC_STACR_PHYE
)
{
MDIO_DEBUG
((
"EMAC MDIO PHY error !
\n
"
));
return
-
1
;
}
MDIO_DEBUG
((
" -> 0x%x
\n
"
,
stacr
>>
16
));
return
(
stacr
>>
16
);
}
void
emac_phy_write
(
struct
net_device
*
dev
,
int
mii_id
,
int
reg
,
int
data
)
{
uint32_t
stacr
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
emac_t
*
emacp
=
fep
->
emacp
;
MDIO_DEBUG
((
"%s phy_write, id: 0x%x, reg: 0x%x, data: 0x%x
\n
"
,
dev
->
name
,
mii_id
,
reg
,
data
));
/* Enable proper ZMII port */
if
(
fep
->
zmii_dev
)
emac_enable_zmii_port
(
fep
->
zmii_dev
,
fep
->
zmii_input
);
/* Use the EMAC that has the MDIO port */
if
(
fep
->
mdio_dev
)
{
dev
=
fep
->
mdio_dev
;
fep
=
dev
->
priv
;
emacp
=
fep
->
emacp
;
}
udelay
(
MDIO_DELAY
);
if
((
in_be32
(
&
emacp
->
em0stacr
)
&
EMAC_STACR_OC
)
==
0
)
{
printk
(
KERN_WARNING
"%s: PHY write timeout #2!
\n
"
,
dev
->
name
);
return
;
}
/* Clear the speed bits and make a read request to the PHY */
stacr
=
((
EMAC_STACR_WRITE
|
(
reg
&
0x1f
))
&
~
EMAC_STACR_CLK_100MHZ
);
stacr
|=
((
mii_id
&
0x1f
)
<<
5
)
|
((
data
&
0xffff
)
<<
16
);
out_be32
(
&
emacp
->
em0stacr
,
stacr
);
udelay
(
MDIO_DELAY
);
if
((
in_be32
(
&
emacp
->
em0stacr
)
&
EMAC_STACR_OC
)
==
0
)
printk
(
KERN_WARNING
"%s: PHY write timeout #2!
\n
"
,
dev
->
name
);
/* Check for a write error */
if
((
stacr
&
EMAC_STACR_PHYE
)
!=
0
)
{
MDIO_DEBUG
((
"EMAC MDIO PHY error !
\n
"
));
}
}
static
void
emac_txeob_dev
(
void
*
param
,
u32
chanmask
)
{
struct
net_device
*
dev
=
param
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
fep
->
lock
,
flags
);
PKT_DEBUG
((
"emac_txeob_dev() entry, tx_cnt: %d
\n
"
,
fep
->
tx_cnt
));
while
(
fep
->
tx_cnt
&&
!
(
fep
->
tx_desc
[
fep
->
ack_slot
].
ctrl
&
MAL_TX_CTRL_READY
))
{
if
(
fep
->
tx_desc
[
fep
->
ack_slot
].
ctrl
&
MAL_TX_CTRL_LAST
)
{
/* Tell the system the transmit completed. */
dma_unmap_single
(
&
fep
->
ocpdev
->
dev
,
fep
->
tx_desc
[
fep
->
ack_slot
].
data_ptr
,
fep
->
tx_desc
[
fep
->
ack_slot
].
data_len
,
DMA_TO_DEVICE
);
dev_kfree_skb_irq
(
fep
->
tx_skb
[
fep
->
ack_slot
]);
if
(
fep
->
tx_desc
[
fep
->
ack_slot
].
ctrl
&
(
EMAC_TX_ST_EC
|
EMAC_TX_ST_MC
|
EMAC_TX_ST_SC
))
fep
->
stats
.
collisions
++
;
}
fep
->
tx_skb
[
fep
->
ack_slot
]
=
(
struct
sk_buff
*
)
NULL
;
if
(
++
fep
->
ack_slot
==
NUM_TX_BUFF
)
fep
->
ack_slot
=
0
;
fep
->
tx_cnt
--
;
}
if
(
fep
->
tx_cnt
<
NUM_TX_BUFF
)
netif_wake_queue
(
dev
);
PKT_DEBUG
((
"emac_txeob_dev() exit, tx_cnt: %d
\n
"
,
fep
->
tx_cnt
));
spin_unlock_irqrestore
(
&
fep
->
lock
,
flags
);
}
/*
Fill/Re-fill the rx chain with valid ctrl/ptrs.
This function will fill from rx_slot up to the parm end.
So to completely fill the chain pre-set rx_slot to 0 and
pass in an end of 0.
*/
static
void
emac_rx_fill
(
struct
net_device
*
dev
,
int
end
)
{
int
i
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
i
=
fep
->
rx_slot
;
do
{
/* We don't want the 16 bytes skb_reserve done by dev_alloc_skb,
* it breaks our cache line alignement. However, we still allocate
* +16 so that we end up allocating the exact same size as
* dev_alloc_skb() would do.
* Also, because of the skb_res, the max DMA size we give to EMAC
* is slighly wrong, causing it to potentially DMA 2 more bytes
* from a broken/oversized packet. These 16 bytes will take care
* that we don't walk on somebody else toes with that.
*/
fep
->
rx_skb
[
i
]
=
alloc_skb
(
fep
->
rx_buffer_size
+
16
,
GFP_ATOMIC
);
if
(
fep
->
rx_skb
[
i
]
==
NULL
)
{
/* Keep rx_slot here, the next time clean/fill is called
* we will try again before the MAL wraps back here
* If the MAL tries to use this descriptor with
* the EMPTY bit off it will cause the
* rxde interrupt. That is where we will
* try again to allocate an sk_buff.
*/
break
;
}
if
(
skb_res
)
skb_reserve
(
fep
->
rx_skb
[
i
],
skb_res
);
/* We must NOT dma_map_single the cache line right after the
* buffer, so we must crop our sync size to account for the
* reserved space
*/
fep
->
rx_desc
[
i
].
data_ptr
=
(
unsigned
char
*
)
dma_map_single
(
&
fep
->
ocpdev
->
dev
,
(
void
*
)
fep
->
rx_skb
[
i
]
->
data
,
fep
->
rx_buffer_size
-
skb_res
,
DMA_FROM_DEVICE
);
/*
* Some 4xx implementations use the previously
* reserved bits in data_len to encode the MS
* 4-bits of a 36-bit physical address (ERPN)
* This must be initialized.
*/
fep
->
rx_desc
[
i
].
data_len
=
0
;
fep
->
rx_desc
[
i
].
ctrl
=
MAL_RX_CTRL_EMPTY
|
MAL_RX_CTRL_INTR
|
(
i
==
(
NUM_RX_BUFF
-
1
)
?
MAL_RX_CTRL_WRAP
:
0
);
}
while
((
i
=
(
i
+
1
)
%
NUM_RX_BUFF
)
!=
end
);
fep
->
rx_slot
=
i
;
}
static
void
emac_rx_csum
(
struct
net_device
*
dev
,
unsigned
short
ctrl
,
struct
sk_buff
*
skb
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
/* Exit if interface has no TAH engine */
if
(
!
fep
->
tah_dev
)
{
skb
->
ip_summed
=
CHECKSUM_NONE
;
return
;
}
/* Check for TCP/UDP/IP csum error */
if
(
ctrl
&
EMAC_CSUM_VER_ERROR
)
{
/* Let the stack verify checksum errors */
skb
->
ip_summed
=
CHECKSUM_NONE
;
/* adapter->hw_csum_err++; */
}
else
{
/* Csum is good */
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
/* adapter->hw_csum_good++; */
}
}
static
int
emac_rx_clean
(
struct
net_device
*
dev
)
{
int
i
,
b
,
bnum
,
buf
[
6
];
int
error
,
frame_length
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
unsigned
short
ctrl
;
i
=
fep
->
rx_slot
;
PKT_DEBUG
((
"emac_rx_clean() entry, rx_slot: %d
\n
"
,
fep
->
rx_slot
));
do
{
if
(
fep
->
rx_skb
[
i
]
==
NULL
)
continue
;
/*we have already handled the packet but haved failed to alloc */
/*
since rx_desc is in uncached mem we don't keep reading it directly
we pull out a local copy of ctrl and do the checks on the copy.
*/
ctrl
=
fep
->
rx_desc
[
i
].
ctrl
;
if
(
ctrl
&
MAL_RX_CTRL_EMPTY
)
break
;
/*we don't have any more ready packets */
if
(
EMAC_IS_BAD_RX_PACKET
(
ctrl
))
{
fep
->
stats
.
rx_errors
++
;
fep
->
stats
.
rx_dropped
++
;
if
(
ctrl
&
EMAC_RX_ST_OE
)
fep
->
stats
.
rx_fifo_errors
++
;
if
(
ctrl
&
EMAC_RX_ST_AE
)
fep
->
stats
.
rx_frame_errors
++
;
if
(
ctrl
&
EMAC_RX_ST_BFCS
)
fep
->
stats
.
rx_crc_errors
++
;
if
(
ctrl
&
(
EMAC_RX_ST_RP
|
EMAC_RX_ST_PTL
|
EMAC_RX_ST_ORE
|
EMAC_RX_ST_IRE
))
fep
->
stats
.
rx_length_errors
++
;
}
else
{
if
((
ctrl
&
(
MAL_RX_CTRL_FIRST
|
MAL_RX_CTRL_LAST
))
==
(
MAL_RX_CTRL_FIRST
|
MAL_RX_CTRL_LAST
))
{
/* Single descriptor packet */
emac_rx_csum
(
dev
,
ctrl
,
fep
->
rx_skb
[
i
]);
/* Send the skb up the chain. */
frame_length
=
fep
->
rx_desc
[
i
].
data_len
-
4
;
skb_put
(
fep
->
rx_skb
[
i
],
frame_length
);
fep
->
rx_skb
[
i
]
->
dev
=
dev
;
fep
->
rx_skb
[
i
]
->
protocol
=
eth_type_trans
(
fep
->
rx_skb
[
i
],
dev
);
error
=
netif_rx
(
fep
->
rx_skb
[
i
]);
if
((
error
==
NET_RX_DROP
)
||
(
error
==
NET_RX_BAD
))
{
fep
->
stats
.
rx_dropped
++
;
}
else
{
fep
->
stats
.
rx_packets
++
;
fep
->
stats
.
rx_bytes
+=
frame_length
;
}
fep
->
rx_skb
[
i
]
=
NULL
;
}
else
{
/* Multiple descriptor packet */
if
(
ctrl
&
MAL_RX_CTRL_FIRST
)
{
if
(
fep
->
rx_desc
[(
i
+
1
)
%
NUM_RX_BUFF
].
ctrl
&
MAL_RX_CTRL_EMPTY
)
break
;
bnum
=
0
;
buf
[
bnum
]
=
i
;
++
bnum
;
continue
;
}
if
(((
ctrl
&
MAL_RX_CTRL_FIRST
)
!=
MAL_RX_CTRL_FIRST
)
&&
((
ctrl
&
MAL_RX_CTRL_LAST
)
!=
MAL_RX_CTRL_LAST
))
{
if
(
fep
->
rx_desc
[(
i
+
1
)
%
NUM_RX_BUFF
].
ctrl
&
MAL_RX_CTRL_EMPTY
)
{
i
=
buf
[
0
];
break
;
}
buf
[
bnum
]
=
i
;
++
bnum
;
continue
;
}
if
(
ctrl
&
MAL_RX_CTRL_LAST
)
{
buf
[
bnum
]
=
i
;
++
bnum
;
skb_put
(
fep
->
rx_skb
[
buf
[
0
]],
fep
->
rx_desc
[
buf
[
0
]].
data_len
);
for
(
b
=
1
;
b
<
bnum
;
b
++
)
{
/*
* MAL is braindead, we need
* to copy the remainder
* of the packet from the
* latter descriptor buffers
* to the first skb. Then
* dispose of the source
* skbs.
*
* Once the stack is fixed
* to handle frags on most
* protocols we can generate
* a fragmented skb with
* no copies.
*/
memcpy
(
fep
->
rx_skb
[
buf
[
0
]]
->
data
+
fep
->
rx_skb
[
buf
[
0
]]
->
len
,
fep
->
rx_skb
[
buf
[
b
]]
->
data
,
fep
->
rx_desc
[
buf
[
b
]].
data_len
);
skb_put
(
fep
->
rx_skb
[
buf
[
0
]],
fep
->
rx_desc
[
buf
[
b
]].
data_len
);
dma_unmap_single
(
&
fep
->
ocpdev
->
dev
,
fep
->
rx_desc
[
buf
[
b
]].
data_ptr
,
fep
->
rx_desc
[
buf
[
b
]].
data_len
,
DMA_FROM_DEVICE
);
dev_kfree_skb
(
fep
->
rx_skb
[
buf
[
b
]]);
}
emac_rx_csum
(
dev
,
ctrl
,
fep
->
rx_skb
[
buf
[
0
]]);
fep
->
rx_skb
[
buf
[
0
]]
->
dev
=
dev
;
fep
->
rx_skb
[
buf
[
0
]]
->
protocol
=
eth_type_trans
(
fep
->
rx_skb
[
buf
[
0
]],
dev
);
error
=
netif_rx
(
fep
->
rx_skb
[
buf
[
0
]]);
if
((
error
==
NET_RX_DROP
)
||
(
error
==
NET_RX_BAD
))
{
fep
->
stats
.
rx_dropped
++
;
}
else
{
fep
->
stats
.
rx_packets
++
;
fep
->
stats
.
rx_bytes
+=
fep
->
rx_skb
[
buf
[
0
]]
->
len
;
}
for
(
b
=
0
;
b
<
bnum
;
b
++
)
fep
->
rx_skb
[
buf
[
b
]]
=
NULL
;
}
}
}
}
while
((
i
=
(
i
+
1
)
%
NUM_RX_BUFF
)
!=
fep
->
rx_slot
);
PKT_DEBUG
((
"emac_rx_clean() exit, rx_slot: %d
\n
"
,
fep
->
rx_slot
));
return
i
;
}
static
void
emac_rxeob_dev
(
void
*
param
,
u32
chanmask
)
{
struct
net_device
*
dev
=
param
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
unsigned
long
flags
;
int
n
;
spin_lock_irqsave
(
&
fep
->
lock
,
flags
);
if
((
n
=
emac_rx_clean
(
dev
))
!=
fep
->
rx_slot
)
emac_rx_fill
(
dev
,
n
);
spin_unlock_irqrestore
(
&
fep
->
lock
,
flags
);
}
/*
* This interrupt should never occurr, we don't program
* the MAL for contiunous mode.
*/
static
void
emac_txde_dev
(
void
*
param
,
u32
chanmask
)
{
struct
net_device
*
dev
=
param
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
printk
(
KERN_WARNING
"%s: transmit descriptor error
\n
"
,
dev
->
name
);
emac_mac_dump
(
dev
);
emac_mal_dump
(
dev
);
/* Reenable the transmit channel */
mal_enable_tx_channels
(
fep
->
mal
,
fep
->
commac
.
tx_chan_mask
);
}
/*
* This interrupt should be very rare at best. This occurs when
* the hardware has a problem with the receive descriptors. The manual
* states that it occurs when the hardware cannot the receive descriptor
* empty bit is not set. The recovery mechanism will be to
* traverse through the descriptors, handle any that are marked to be
* handled and reinitialize each along the way. At that point the driver
* will be restarted.
*/
static
void
emac_rxde_dev
(
void
*
param
,
u32
chanmask
)
{
struct
net_device
*
dev
=
param
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
unsigned
long
flags
;
if
(
net_ratelimit
())
{
printk
(
KERN_WARNING
"%s: receive descriptor error
\n
"
,
fep
->
ndev
->
name
);
emac_mac_dump
(
dev
);
emac_mal_dump
(
dev
);
emac_desc_dump
(
dev
);
}
/* Disable RX channel */
spin_lock_irqsave
(
&
fep
->
lock
,
flags
);
mal_disable_rx_channels
(
fep
->
mal
,
fep
->
commac
.
rx_chan_mask
);
/* For now, charge the error against all emacs */
fep
->
stats
.
rx_errors
++
;
/* so do we have any good packets still? */
emac_rx_clean
(
dev
);
/* When the interface is restarted it resets processing to the
* first descriptor in the table.
*/
fep
->
rx_slot
=
0
;
emac_rx_fill
(
dev
,
0
);
set_mal_dcrn
(
fep
->
mal
,
DCRN_MALRXEOBISR
,
fep
->
commac
.
rx_chan_mask
);
set_mal_dcrn
(
fep
->
mal
,
DCRN_MALRXDEIR
,
fep
->
commac
.
rx_chan_mask
);
/* Reenable the receive channels */
mal_enable_rx_channels
(
fep
->
mal
,
fep
->
commac
.
rx_chan_mask
);
spin_unlock_irqrestore
(
&
fep
->
lock
,
flags
);
}
static
irqreturn_t
emac_mac_irq
(
int
irq
,
void
*
dev_instance
,
struct
pt_regs
*
regs
)
{
struct
net_device
*
dev
=
dev_instance
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
emac_t
*
emacp
=
fep
->
emacp
;
unsigned
long
tmp_em0isr
;
/* EMAC interrupt */
tmp_em0isr
=
in_be32
(
&
emacp
->
em0isr
);
if
(
tmp_em0isr
&
(
EMAC_ISR_TE0
|
EMAC_ISR_TE1
))
{
/* This error is a hard transmit error - could retransmit */
fep
->
stats
.
tx_errors
++
;
/* Reenable the transmit channel */
mal_enable_tx_channels
(
fep
->
mal
,
fep
->
commac
.
tx_chan_mask
);
}
else
{
fep
->
stats
.
rx_errors
++
;
}
if
(
tmp_em0isr
&
EMAC_ISR_RP
)
fep
->
stats
.
rx_length_errors
++
;
if
(
tmp_em0isr
&
EMAC_ISR_ALE
)
fep
->
stats
.
rx_frame_errors
++
;
if
(
tmp_em0isr
&
EMAC_ISR_BFCS
)
fep
->
stats
.
rx_crc_errors
++
;
if
(
tmp_em0isr
&
EMAC_ISR_PTLE
)
fep
->
stats
.
rx_length_errors
++
;
if
(
tmp_em0isr
&
EMAC_ISR_ORE
)
fep
->
stats
.
rx_length_errors
++
;
if
(
tmp_em0isr
&
EMAC_ISR_TE0
)
fep
->
stats
.
tx_aborted_errors
++
;
emac_err_dump
(
dev
,
tmp_em0isr
);
out_be32
(
&
emacp
->
em0isr
,
tmp_em0isr
);
return
IRQ_HANDLED
;
}
static
int
emac_start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
{
unsigned
short
ctrl
;
unsigned
long
flags
;
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
emac_t
*
emacp
=
fep
->
emacp
;
int
len
=
skb
->
len
;
unsigned
int
offset
=
0
,
size
,
f
,
tx_slot_first
;
unsigned
int
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
spin_lock_irqsave
(
&
fep
->
lock
,
flags
);
len
-=
skb
->
data_len
;
if
((
fep
->
tx_cnt
+
nr_frags
+
len
/
DESC_BUF_SIZE
+
1
)
>
NUM_TX_BUFF
)
{
PKT_DEBUG
((
"emac_start_xmit() stopping queue
\n
"
));
netif_stop_queue
(
dev
);
spin_unlock_irqrestore
(
&
fep
->
lock
,
flags
);
restore_flags
(
flags
);
return
-
EBUSY
;
}
tx_slot_first
=
fep
->
tx_slot
;
while
(
len
)
{
size
=
min
(
len
,
DESC_BUF_SIZE
);
fep
->
tx_desc
[
fep
->
tx_slot
].
data_len
=
(
short
)
size
;
fep
->
tx_desc
[
fep
->
tx_slot
].
data_ptr
=
(
unsigned
char
*
)
dma_map_single
(
&
fep
->
ocpdev
->
dev
,
(
void
*
)((
unsigned
int
)
skb
->
data
+
offset
),
size
,
DMA_TO_DEVICE
);
ctrl
=
EMAC_TX_CTRL_DFLT
;
if
(
fep
->
tx_slot
!=
tx_slot_first
)
ctrl
|=
MAL_TX_CTRL_READY
;
if
((
NUM_TX_BUFF
-
1
)
==
fep
->
tx_slot
)
ctrl
|=
MAL_TX_CTRL_WRAP
;
if
(
!
nr_frags
&&
(
len
==
size
))
{
ctrl
|=
MAL_TX_CTRL_LAST
;
fep
->
tx_skb
[
fep
->
tx_slot
]
=
skb
;
}
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
ctrl
|=
EMAC_TX_CTRL_TAH_CSUM
;
fep
->
tx_desc
[
fep
->
tx_slot
].
ctrl
=
ctrl
;
len
-=
size
;
offset
+=
size
;
/* Bump tx count */
if
(
++
fep
->
tx_cnt
==
NUM_TX_BUFF
)
netif_stop_queue
(
dev
);
/* Next descriptor */
if
(
++
fep
->
tx_slot
==
NUM_TX_BUFF
)
fep
->
tx_slot
=
0
;
}
for
(
f
=
0
;
f
<
nr_frags
;
f
++
)
{
struct
skb_frag_struct
*
frag
;
frag
=
&
skb_shinfo
(
skb
)
->
frags
[
f
];
len
=
frag
->
size
;
offset
=
0
;
while
(
len
)
{
size
=
min
(
len
,
DESC_BUF_SIZE
);
dma_map_page
(
&
fep
->
ocpdev
->
dev
,
frag
->
page
,
frag
->
page_offset
+
offset
,
size
,
DMA_TO_DEVICE
);
ctrl
=
EMAC_TX_CTRL_DFLT
|
MAL_TX_CTRL_READY
;
if
((
NUM_TX_BUFF
-
1
)
==
fep
->
tx_slot
)
ctrl
|=
MAL_TX_CTRL_WRAP
;
if
((
f
==
(
nr_frags
-
1
))
&&
(
len
==
size
))
{
ctrl
|=
MAL_TX_CTRL_LAST
;
fep
->
tx_skb
[
fep
->
tx_slot
]
=
skb
;
}
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
ctrl
|=
EMAC_TX_CTRL_TAH_CSUM
;
fep
->
tx_desc
[
fep
->
tx_slot
].
data_len
=
(
short
)
size
;
fep
->
tx_desc
[
fep
->
tx_slot
].
data_ptr
=
(
char
*
)((
page_to_pfn
(
frag
->
page
)
<<
PAGE_SHIFT
)
+
frag
->
page_offset
+
offset
);
fep
->
tx_desc
[
fep
->
tx_slot
].
ctrl
=
ctrl
;
len
-=
size
;
offset
+=
size
;
/* Bump tx count */
if
(
++
fep
->
tx_cnt
==
NUM_TX_BUFF
)
netif_stop_queue
(
dev
);
/* Next descriptor */
if
(
++
fep
->
tx_slot
==
NUM_TX_BUFF
)
fep
->
tx_slot
=
0
;
}
}
/*
* Deferred set READY on first descriptor of packet to
* avoid TX MAL race.
*/
fep
->
tx_desc
[
tx_slot_first
].
ctrl
|=
MAL_TX_CTRL_READY
;
/* Send the packet out. */
out_be32
(
&
emacp
->
em0tmr0
,
EMAC_TMR0_XMIT
);
fep
->
stats
.
tx_packets
++
;
fep
->
stats
.
tx_bytes
+=
skb
->
len
;
PKT_DEBUG
((
"emac_start_xmit() exitn"
));
spin_unlock_irqrestore
(
&
fep
->
lock
,
flags
);
return
0
;
}
static
int
emac_adjust_to_link
(
struct
ocp_enet_private
*
fep
)
{
emac_t
*
emacp
=
fep
->
emacp
;
struct
ibm_ocp_rgmii
*
rgmii
;
unsigned
long
mode_reg
;
int
full_duplex
,
speed
;
full_duplex
=
0
;
speed
=
SPEED_10
;
/* set mode register 1 defaults */
mode_reg
=
EMAC_M1_DEFAULT
;
/* Read link mode on PHY */
if
(
fep
->
phy_mii
.
def
->
ops
->
read_link
(
&
fep
->
phy_mii
)
==
0
)
{
/* If an error occurred, we don't deal with it yet */
full_duplex
=
(
fep
->
phy_mii
.
duplex
==
DUPLEX_FULL
);
speed
=
fep
->
phy_mii
.
speed
;
}
if
(
fep
->
rgmii_dev
)
rgmii
=
RGMII_PRIV
(
fep
->
rgmii_dev
);
/* set speed (default is 10Mb) */
switch
(
speed
)
{
case
SPEED_1000
:
mode_reg
|=
EMAC_M1_JUMBO_ENABLE
|
EMAC_M1_RFS_16K
;
if
((
rgmii
->
mode
[
fep
->
rgmii_input
]
==
RTBI
)
||
(
rgmii
->
mode
[
fep
->
rgmii_input
]
==
TBI
))
mode_reg
|=
EMAC_M1_MF_1000GPCS
;
else
mode_reg
|=
EMAC_M1_MF_1000MBPS
;
if
(
fep
->
rgmii_dev
)
emac_rgmii_port_speed
(
fep
->
rgmii_dev
,
fep
->
rgmii_input
,
1000
);
break
;
case
SPEED_100
:
mode_reg
|=
EMAC_M1_MF_100MBPS
|
EMAC_M1_RFS_4K
;
if
(
fep
->
rgmii_dev
)
emac_rgmii_port_speed
(
fep
->
rgmii_dev
,
fep
->
rgmii_input
,
100
);
if
(
fep
->
zmii_dev
)
emac_zmii_port_speed
(
fep
->
zmii_dev
,
fep
->
zmii_input
,
100
);
break
;
case
SPEED_10
:
default:
mode_reg
=
(
mode_reg
&
~
EMAC_M1_MF_100MBPS
)
|
EMAC_M1_RFS_4K
;
if
(
fep
->
rgmii_dev
)
emac_rgmii_port_speed
(
fep
->
rgmii_dev
,
fep
->
rgmii_input
,
10
);
if
(
fep
->
zmii_dev
)
emac_zmii_port_speed
(
fep
->
zmii_dev
,
fep
->
zmii_input
,
10
);
}
if
(
full_duplex
)
mode_reg
|=
EMAC_M1_FDE
|
EMAC_M1_EIFC
|
EMAC_M1_IST
;
else
mode_reg
&=
~
(
EMAC_M1_FDE
|
EMAC_M1_EIFC
|
EMAC_M1_ILE
);
LINK_DEBUG
((
"%s: adjust to link, speed: %d, duplex: %d, opened: %d
\n
"
,
fep
->
ndev
->
name
,
speed
,
full_duplex
,
fep
->
opened
));
printk
(
KERN_INFO
"%s: Speed: %d, %s duplex.
\n
"
,
fep
->
ndev
->
name
,
speed
,
full_duplex
?
"Full"
:
"Half"
);
if
(
fep
->
opened
)
out_be32
(
&
emacp
->
em0mr1
,
mode_reg
);
return
0
;
}
static
int
emac_set_mac_address
(
struct
net_device
*
ndev
,
void
*
p
)
{
struct
ocp_enet_private
*
fep
=
ndev
->
priv
;
emac_t
*
emacp
=
fep
->
emacp
;
struct
sockaddr
*
addr
=
p
;
if
(
!
is_valid_ether_addr
(
addr
->
sa_data
))
return
-
EADDRNOTAVAIL
;
memcpy
(
ndev
->
dev_addr
,
addr
->
sa_data
,
ndev
->
addr_len
);
/* set the high address */
out_be32
(
&
emacp
->
em0iahr
,
(
fep
->
ndev
->
dev_addr
[
0
]
<<
8
)
|
fep
->
ndev
->
dev_addr
[
1
]);
/* set the low address */
out_be32
(
&
emacp
->
em0ialr
,
(
fep
->
ndev
->
dev_addr
[
2
]
<<
24
)
|
(
fep
->
ndev
->
dev_addr
[
3
]
<<
16
)
|
(
fep
->
ndev
->
dev_addr
[
4
]
<<
8
)
|
fep
->
ndev
->
dev_addr
[
5
]);
return
0
;
}
static
int
emac_change_mtu
(
struct
net_device
*
dev
,
int
new_mtu
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
int
old_mtu
=
dev
->
mtu
;
emac_t
*
emacp
=
fep
->
emacp
;
u32
em0mr0
;
int
i
,
full
;
unsigned
long
flags
;
if
((
new_mtu
<
EMAC_MIN_MTU
)
||
(
new_mtu
>
EMAC_MAX_MTU
))
{
printk
(
KERN_ERR
"emac: Invalid MTU setting, MTU must be between %d and %d
\n
"
,
EMAC_MIN_MTU
,
EMAC_MAX_MTU
);
return
-
EINVAL
;
}
if
(
old_mtu
!=
new_mtu
&&
netif_running
(
dev
))
{
/* Stop rx engine */
em0mr0
=
in_be32
(
&
emacp
->
em0mr0
);
out_be32
(
&
emacp
->
em0mr0
,
em0mr0
&
~
EMAC_M0_RXE
);
/* Wait for descriptors to be empty */
do
{
full
=
0
;
for
(
i
=
0
;
i
<
NUM_RX_BUFF
;
i
++
)
if
(
!
(
fep
->
rx_desc
[
i
].
ctrl
&
MAL_RX_CTRL_EMPTY
))
{
printk
(
KERN_NOTICE
"emac: RX ring is still full
\n
"
);
full
=
1
;
}
}
while
(
full
);
spin_lock_irqsave
(
&
fep
->
lock
,
flags
);
mal_disable_rx_channels
(
fep
->
mal
,
fep
->
commac
.
rx_chan_mask
);
/* Destroy all old rx skbs */
for
(
i
=
0
;
i
<
NUM_RX_BUFF
;
i
++
)
{
dma_unmap_single
(
&
fep
->
ocpdev
->
dev
,
fep
->
rx_desc
[
i
].
data_ptr
,
fep
->
rx_desc
[
i
].
data_len
,
DMA_FROM_DEVICE
);
dev_kfree_skb
(
fep
->
rx_skb
[
i
]);
fep
->
rx_skb
[
i
]
=
NULL
;
}
/* Set new rx_buffer_size and advertise new mtu */
fep
->
rx_buffer_size
=
new_mtu
+
ENET_HEADER_SIZE
+
ENET_FCS_SIZE
;
dev
->
mtu
=
new_mtu
;
/* Re-init rx skbs */
fep
->
rx_slot
=
0
;
emac_rx_fill
(
dev
,
0
);
/* Restart the rx engine */
mal_enable_rx_channels
(
fep
->
mal
,
fep
->
commac
.
rx_chan_mask
);
out_be32
(
&
emacp
->
em0mr0
,
em0mr0
|
EMAC_M0_RXE
);
spin_unlock_irqrestore
(
&
fep
->
lock
,
flags
);
}
return
0
;
}
static
void
__emac_set_multicast_list
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
emac_t
*
emacp
=
fep
->
emacp
;
u32
rmr
=
in_be32
(
&
emacp
->
em0rmr
);
/* First clear all special bits, they can be set later */
rmr
&=
~
(
EMAC_RMR_PME
|
EMAC_RMR_PMME
|
EMAC_RMR_MAE
);
if
(
dev
->
flags
&
IFF_PROMISC
)
{
rmr
|=
EMAC_RMR_PME
;
}
else
if
(
dev
->
flags
&
IFF_ALLMULTI
||
32
<
dev
->
mc_count
)
{
/*
* Must be setting up to use multicast
* Now check for promiscuous multicast
*/
rmr
|=
EMAC_RMR_PMME
;
}
else
if
(
dev
->
flags
&
IFF_MULTICAST
&&
0
<
dev
->
mc_count
)
{
unsigned
short
em0gaht
[
4
]
=
{
0
,
0
,
0
,
0
};
struct
dev_mc_list
*
dmi
;
/* Need to hash on the multicast address. */
for
(
dmi
=
dev
->
mc_list
;
dmi
;
dmi
=
dmi
->
next
)
{
unsigned
long
mc_crc
;
unsigned
int
bit_number
;
mc_crc
=
ether_crc
(
6
,
(
char
*
)
dmi
->
dmi_addr
);
bit_number
=
63
-
(
mc_crc
>>
26
);
/* MSB: 0 LSB: 63 */
em0gaht
[
bit_number
>>
4
]
|=
0x8000
>>
(
bit_number
&
0x0f
);
}
emacp
->
em0gaht1
=
em0gaht
[
0
];
emacp
->
em0gaht2
=
em0gaht
[
1
];
emacp
->
em0gaht3
=
em0gaht
[
2
];
emacp
->
em0gaht4
=
em0gaht
[
3
];
/* Turn on multicast addressing */
rmr
|=
EMAC_RMR_MAE
;
}
out_be32
(
&
emacp
->
em0rmr
,
rmr
);
}
static
int
emac_init_tah
(
struct
ocp_enet_private
*
fep
)
{
tah_t
*
tahp
;
/* Initialize TAH and enable checksum verification */
tahp
=
(
tah_t
*
)
ioremap
(
fep
->
tah_dev
->
def
->
paddr
,
sizeof
(
*
tahp
));
if
(
tahp
==
NULL
)
{
printk
(
KERN_ERR
"tah%d: Cannot ioremap TAH registers!
\n
"
,
fep
->
tah_dev
->
def
->
index
);
return
-
ENOMEM
;
}
out_be32
(
&
tahp
->
tah_mr
,
TAH_MR_SR
);
/* wait for reset to complete */
while
(
in_be32
(
&
tahp
->
tah_mr
)
&
TAH_MR_SR
)
;
/* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
out_be32
(
&
tahp
->
tah_mr
,
TAH_MR_CVR
|
TAH_MR_ST_768
|
TAH_MR_TFS_10KB
|
TAH_MR_DTFP
|
TAH_MR_DIG
);
iounmap
(
&
tahp
);
return
0
;
}
static
void
emac_init_rings
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
ep
=
dev
->
priv
;
int
loop
;
ep
->
tx_desc
=
(
struct
mal_descriptor
*
)((
char
*
)
ep
->
mal
->
tx_virt_addr
+
(
ep
->
mal_tx_chan
*
MAL_DT_ALIGN
));
ep
->
rx_desc
=
(
struct
mal_descriptor
*
)((
char
*
)
ep
->
mal
->
rx_virt_addr
+
(
ep
->
mal_rx_chan
*
MAL_DT_ALIGN
));
/* Fill in the transmit descriptor ring. */
for
(
loop
=
0
;
loop
<
NUM_TX_BUFF
;
loop
++
)
{
if
(
ep
->
tx_skb
[
loop
])
{
dma_unmap_single
(
&
ep
->
ocpdev
->
dev
,
ep
->
tx_desc
[
loop
].
data_ptr
,
ep
->
tx_desc
[
loop
].
data_len
,
DMA_TO_DEVICE
);
dev_kfree_skb_irq
(
ep
->
tx_skb
[
loop
]);
}
ep
->
tx_skb
[
loop
]
=
NULL
;
ep
->
tx_desc
[
loop
].
ctrl
=
0
;
ep
->
tx_desc
[
loop
].
data_len
=
0
;
ep
->
tx_desc
[
loop
].
data_ptr
=
NULL
;
}
ep
->
tx_desc
[
loop
-
1
].
ctrl
|=
MAL_TX_CTRL_WRAP
;
/* Format the receive descriptor ring. */
ep
->
rx_slot
=
0
;
/* Default is MTU=1500 + Ethernet overhead */
ep
->
rx_buffer_size
=
ENET_DEF_BUF_SIZE
;
emac_rx_fill
(
dev
,
0
);
if
(
ep
->
rx_slot
!=
0
)
{
printk
(
KERN_ERR
"%s: Not enough mem for RxChain durning Open?
\n
"
,
dev
->
name
);
/*We couldn't fill the ring at startup?
*We could clean up and fail to open but right now we will try to
*carry on. It may be a sign of a bad NUM_RX_BUFF value
*/
}
ep
->
tx_cnt
=
0
;
ep
->
tx_slot
=
0
;
ep
->
ack_slot
=
0
;
}
static
void
emac_reset_configure
(
struct
ocp_enet_private
*
fep
)
{
emac_t
*
emacp
=
fep
->
emacp
;
int
i
;
mal_disable_tx_channels
(
fep
->
mal
,
fep
->
commac
.
tx_chan_mask
);
mal_disable_rx_channels
(
fep
->
mal
,
fep
->
commac
.
rx_chan_mask
);
/*
* Check for a link, some PHYs don't provide a clock if
* no link is present. Some EMACs will not come out of
* soft reset without a PHY clock present.
*/
if
(
fep
->
phy_mii
.
def
->
ops
->
poll_link
(
&
fep
->
phy_mii
))
{
/* Reset the EMAC */
out_be32
(
&
emacp
->
em0mr0
,
EMAC_M0_SRST
);
udelay
(
20
);
for
(
i
=
0
;
i
<
100
;
i
++
)
{
if
((
in_be32
(
&
emacp
->
em0mr0
)
&
EMAC_M0_SRST
)
==
0
)
break
;
udelay
(
10
);
}
if
(
i
>=
100
)
{
printk
(
KERN_ERR
"%s: Cannot reset EMAC
\n
"
,
fep
->
ndev
->
name
);
return
;
}
}
/* Switch IRQs off for now */
out_be32
(
&
emacp
->
em0iser
,
0
);
/* Configure MAL rx channel */
mal_set_rcbs
(
fep
->
mal
,
fep
->
mal_rx_chan
,
DESC_BUF_SIZE_REG
);
/* set the high address */
out_be32
(
&
emacp
->
em0iahr
,
(
fep
->
ndev
->
dev_addr
[
0
]
<<
8
)
|
fep
->
ndev
->
dev_addr
[
1
]);
/* set the low address */
out_be32
(
&
emacp
->
em0ialr
,
(
fep
->
ndev
->
dev_addr
[
2
]
<<
24
)
|
(
fep
->
ndev
->
dev_addr
[
3
]
<<
16
)
|
(
fep
->
ndev
->
dev_addr
[
4
]
<<
8
)
|
fep
->
ndev
->
dev_addr
[
5
]);
/* Adjust to link */
if
(
netif_carrier_ok
(
fep
->
ndev
))
emac_adjust_to_link
(
fep
);
/* enable broadcast/individual address and RX FIFO defaults */
out_be32
(
&
emacp
->
em0rmr
,
EMAC_RMR_DEFAULT
);
/* set transmit request threshold register */
out_be32
(
&
emacp
->
em0trtr
,
EMAC_TRTR_DEFAULT
);
/* Reconfigure multicast */
__emac_set_multicast_list
(
fep
->
ndev
);
/* Set receiver/transmitter defaults */
out_be32
(
&
emacp
->
em0rwmr
,
EMAC_RWMR_DEFAULT
);
out_be32
(
&
emacp
->
em0tmr0
,
EMAC_TMR0_DEFAULT
);
out_be32
(
&
emacp
->
em0tmr1
,
EMAC_TMR1_DEFAULT
);
/* set frame gap */
out_be32
(
&
emacp
->
em0ipgvr
,
CONFIG_IBM_EMAC_FGAP
);
/* Init ring buffers */
emac_init_rings
(
fep
->
ndev
);
}
static
void
emac_kick
(
struct
ocp_enet_private
*
fep
)
{
emac_t
*
emacp
=
fep
->
emacp
;
unsigned
long
emac_ier
;
emac_ier
=
EMAC_ISR_PP
|
EMAC_ISR_BP
|
EMAC_ISR_RP
|
EMAC_ISR_SE
|
EMAC_ISR_PTLE
|
EMAC_ISR_ALE
|
EMAC_ISR_BFCS
|
EMAC_ISR_ORE
|
EMAC_ISR_IRE
;
out_be32
(
&
emacp
->
em0iser
,
emac_ier
);
/* enable all MAL transmit and receive channels */
mal_enable_tx_channels
(
fep
->
mal
,
fep
->
commac
.
tx_chan_mask
);
mal_enable_rx_channels
(
fep
->
mal
,
fep
->
commac
.
rx_chan_mask
);
/* set transmit and receive enable */
out_be32
(
&
emacp
->
em0mr0
,
EMAC_M0_TXE
|
EMAC_M0_RXE
);
}
static
void
emac_start_link
(
struct
ocp_enet_private
*
fep
,
struct
ethtool_cmd
*
ep
)
{
u32
advertise
;
int
autoneg
;
int
forced_speed
;
int
forced_duplex
;
/* Default advertise */
advertise
=
ADVERTISED_10baseT_Half
|
ADVERTISED_10baseT_Full
|
ADVERTISED_100baseT_Half
|
ADVERTISED_100baseT_Full
|
ADVERTISED_1000baseT_Half
|
ADVERTISED_1000baseT_Full
;
autoneg
=
fep
->
want_autoneg
;
forced_speed
=
fep
->
phy_mii
.
speed
;
forced_duplex
=
fep
->
phy_mii
.
duplex
;
/* Setup link parameters */
if
(
ep
)
{
if
(
ep
->
autoneg
==
AUTONEG_ENABLE
)
{
advertise
=
ep
->
advertising
;
autoneg
=
1
;
}
else
{
autoneg
=
0
;
forced_speed
=
ep
->
speed
;
forced_duplex
=
ep
->
duplex
;
}
}
/* Configure PHY & start aneg */
fep
->
want_autoneg
=
autoneg
;
if
(
autoneg
)
{
LINK_DEBUG
((
"%s: start link aneg, advertise: 0x%x
\n
"
,
fep
->
ndev
->
name
,
advertise
));
fep
->
phy_mii
.
def
->
ops
->
setup_aneg
(
&
fep
->
phy_mii
,
advertise
);
}
else
{
LINK_DEBUG
((
"%s: start link forced, speed: %d, duplex: %d
\n
"
,
fep
->
ndev
->
name
,
forced_speed
,
forced_duplex
));
fep
->
phy_mii
.
def
->
ops
->
setup_forced
(
&
fep
->
phy_mii
,
forced_speed
,
forced_duplex
);
}
fep
->
timer_ticks
=
0
;
mod_timer
(
&
fep
->
link_timer
,
jiffies
+
HZ
);
}
static
void
emac_link_timer
(
unsigned
long
data
)
{
struct
ocp_enet_private
*
fep
=
(
struct
ocp_enet_private
*
)
data
;
int
link
;
if
(
fep
->
going_away
)
return
;
spin_lock_irq
(
&
fep
->
lock
);
link
=
fep
->
phy_mii
.
def
->
ops
->
poll_link
(
&
fep
->
phy_mii
);
LINK_DEBUG
((
"%s: poll_link: %d
\n
"
,
fep
->
ndev
->
name
,
link
));
if
(
link
==
netif_carrier_ok
(
fep
->
ndev
))
{
if
(
!
link
&&
fep
->
want_autoneg
&&
(
++
fep
->
timer_ticks
)
>
10
)
emac_start_link
(
fep
,
NULL
);
goto
out
;
}
printk
(
KERN_INFO
"%s: Link is %s
\n
"
,
fep
->
ndev
->
name
,
link
?
"Up"
:
"Down"
);
if
(
link
)
{
netif_carrier_on
(
fep
->
ndev
);
/* Chip needs a full reset on config change. That sucks, so I
* should ultimately move that to some tasklet to limit
* latency peaks caused by this code
*/
emac_reset_configure
(
fep
);
if
(
fep
->
opened
)
emac_kick
(
fep
);
}
else
{
fep
->
timer_ticks
=
0
;
netif_carrier_off
(
fep
->
ndev
);
}
out:
mod_timer
(
&
fep
->
link_timer
,
jiffies
+
HZ
);
spin_unlock_irq
(
&
fep
->
lock
);
}
static
void
emac_set_multicast_list
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
spin_lock_irq
(
&
fep
->
lock
);
__emac_set_multicast_list
(
dev
);
spin_unlock_irq
(
&
fep
->
lock
);
}
static
int
emac_get_settings
(
struct
net_device
*
ndev
,
struct
ethtool_cmd
*
cmd
)
{
struct
ocp_enet_private
*
fep
=
ndev
->
priv
;
cmd
->
supported
=
fep
->
phy_mii
.
def
->
features
;
cmd
->
port
=
PORT_MII
;
cmd
->
transceiver
=
XCVR_EXTERNAL
;
cmd
->
phy_address
=
fep
->
mii_phy_addr
;
spin_lock_irq
(
&
fep
->
lock
);
cmd
->
autoneg
=
fep
->
want_autoneg
;
cmd
->
speed
=
fep
->
phy_mii
.
speed
;
cmd
->
duplex
=
fep
->
phy_mii
.
duplex
;
spin_unlock_irq
(
&
fep
->
lock
);
return
0
;
}
static
int
emac_set_settings
(
struct
net_device
*
ndev
,
struct
ethtool_cmd
*
cmd
)
{
struct
ocp_enet_private
*
fep
=
ndev
->
priv
;
unsigned
long
features
=
fep
->
phy_mii
.
def
->
features
;
if
(
!
capable
(
CAP_NET_ADMIN
))
return
-
EPERM
;
if
(
cmd
->
autoneg
!=
AUTONEG_ENABLE
&&
cmd
->
autoneg
!=
AUTONEG_DISABLE
)
return
-
EINVAL
;
if
(
cmd
->
autoneg
==
AUTONEG_ENABLE
&&
cmd
->
advertising
==
0
)
return
-
EINVAL
;
if
(
cmd
->
duplex
!=
DUPLEX_HALF
&&
cmd
->
duplex
!=
DUPLEX_FULL
)
return
-
EINVAL
;
if
(
cmd
->
autoneg
==
AUTONEG_DISABLE
)
switch
(
cmd
->
speed
)
{
case
SPEED_10
:
if
(
cmd
->
duplex
==
DUPLEX_HALF
&&
(
features
&
SUPPORTED_10baseT_Half
)
==
0
)
return
-
EINVAL
;
if
(
cmd
->
duplex
==
DUPLEX_FULL
&&
(
features
&
SUPPORTED_10baseT_Full
)
==
0
)
return
-
EINVAL
;
break
;
case
SPEED_100
:
if
(
cmd
->
duplex
==
DUPLEX_HALF
&&
(
features
&
SUPPORTED_100baseT_Half
)
==
0
)
return
-
EINVAL
;
if
(
cmd
->
duplex
==
DUPLEX_FULL
&&
(
features
&
SUPPORTED_100baseT_Full
)
==
0
)
return
-
EINVAL
;
break
;
case
SPEED_1000
:
if
(
cmd
->
duplex
==
DUPLEX_HALF
&&
(
features
&
SUPPORTED_1000baseT_Half
)
==
0
)
return
-
EINVAL
;
if
(
cmd
->
duplex
==
DUPLEX_FULL
&&
(
features
&
SUPPORTED_1000baseT_Full
)
==
0
)
return
-
EINVAL
;
break
;
default:
return
-
EINVAL
;
}
else
if
((
features
&
SUPPORTED_Autoneg
)
==
0
)
return
-
EINVAL
;
spin_lock_irq
(
&
fep
->
lock
);
emac_start_link
(
fep
,
cmd
);
spin_unlock_irq
(
&
fep
->
lock
);
return
0
;
}
static
void
emac_get_drvinfo
(
struct
net_device
*
ndev
,
struct
ethtool_drvinfo
*
info
)
{
struct
ocp_enet_private
*
fep
=
ndev
->
priv
;
strcpy
(
info
->
driver
,
DRV_NAME
);
strcpy
(
info
->
version
,
DRV_VERSION
);
info
->
fw_version
[
0
]
=
'\0'
;
sprintf
(
info
->
bus_info
,
"IBM EMAC %d"
,
fep
->
ocpdev
->
def
->
index
);
info
->
regdump_len
=
0
;
}
static
int
emac_nway_reset
(
struct
net_device
*
ndev
)
{
struct
ocp_enet_private
*
fep
=
ndev
->
priv
;
if
(
!
fep
->
want_autoneg
)
return
-
EINVAL
;
spin_lock_irq
(
&
fep
->
lock
);
emac_start_link
(
fep
,
NULL
);
spin_unlock_irq
(
&
fep
->
lock
);
return
0
;
}
static
u32
emac_get_link
(
struct
net_device
*
ndev
)
{
return
netif_carrier_ok
(
ndev
);
}
static
struct
ethtool_ops
emac_ethtool_ops
=
{
.
get_settings
=
emac_get_settings
,
.
set_settings
=
emac_set_settings
,
.
get_drvinfo
=
emac_get_drvinfo
,
.
nway_reset
=
emac_nway_reset
,
.
get_link
=
emac_get_link
};
static
int
emac_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
rq
,
int
cmd
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
uint
*
data
=
(
uint
*
)
&
rq
->
ifr_data
;
switch
(
cmd
)
{
case
SIOCGMIIPHY
:
data
[
0
]
=
fep
->
mii_phy_addr
;
/* Fall through */
case
SIOCGMIIREG
:
data
[
3
]
=
emac_phy_read
(
dev
,
fep
->
mii_phy_addr
,
data
[
1
]);
return
0
;
case
SIOCSMIIREG
:
if
(
!
capable
(
CAP_NET_ADMIN
))
return
-
EPERM
;
emac_phy_write
(
dev
,
fep
->
mii_phy_addr
,
data
[
1
],
data
[
2
]);
return
0
;
default:
return
-
EOPNOTSUPP
;
}
}
static
int
emac_open
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
int
rc
;
spin_lock_irq
(
&
fep
->
lock
);
fep
->
opened
=
1
;
netif_carrier_off
(
dev
);
/* Reset & configure the chip */
emac_reset_configure
(
fep
);
spin_unlock_irq
(
&
fep
->
lock
);
/* Request our interrupt lines */
rc
=
request_irq
(
dev
->
irq
,
emac_mac_irq
,
0
,
"IBM EMAC MAC"
,
dev
);
if
(
rc
!=
0
)
{
printk
(
"dev->irq %d failed
\n
"
,
dev
->
irq
);
goto
bail
;
}
/* Kick the chip rx & tx channels into life */
spin_lock_irq
(
&
fep
->
lock
);
emac_kick
(
fep
);
spin_unlock_irq
(
&
fep
->
lock
);
netif_start_queue
(
dev
);
bail:
return
rc
;
}
static
int
emac_close
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
emac_t
*
emacp
=
fep
->
emacp
;
/* XXX Stop IRQ emitting here */
spin_lock_irq
(
&
fep
->
lock
);
fep
->
opened
=
0
;
mal_disable_tx_channels
(
fep
->
mal
,
fep
->
commac
.
tx_chan_mask
);
mal_disable_rx_channels
(
fep
->
mal
,
fep
->
commac
.
rx_chan_mask
);
netif_carrier_off
(
dev
);
netif_stop_queue
(
dev
);
/*
* Check for a link, some PHYs don't provide a clock if
* no link is present. Some EMACs will not come out of
* soft reset without a PHY clock present.
*/
if
(
fep
->
phy_mii
.
def
->
ops
->
poll_link
(
&
fep
->
phy_mii
))
{
out_be32
(
&
emacp
->
em0mr0
,
EMAC_M0_SRST
);
udelay
(
10
);
if
(
emacp
->
em0mr0
&
EMAC_M0_SRST
)
{
/*not sure what to do here hopefully it clears before another open */
printk
(
KERN_ERR
"%s: Phy SoftReset didn't clear, no link?
\n
"
,
dev
->
name
);
}
}
/* Free the irq's */
free_irq
(
dev
->
irq
,
dev
);
spin_unlock_irq
(
&
fep
->
lock
);
return
0
;
}
static
void
emac_remove
(
struct
ocp_device
*
ocpdev
)
{
struct
net_device
*
dev
=
ocp_get_drvdata
(
ocpdev
);
struct
ocp_enet_private
*
ep
=
dev
->
priv
;
/* FIXME: locking, races, ... */
ep
->
going_away
=
1
;
ocp_set_drvdata
(
ocpdev
,
NULL
);
if
(
ep
->
rgmii_dev
)
emac_close_rgmii
(
ep
->
rgmii_dev
);
if
(
ep
->
zmii_dev
)
emac_close_zmii
(
ep
->
zmii_dev
);
unregister_netdev
(
dev
);
del_timer_sync
(
&
ep
->
link_timer
);
mal_unregister_commac
(
ep
->
mal
,
&
ep
->
commac
);
iounmap
((
void
*
)
ep
->
emacp
);
kfree
(
dev
);
}
struct
mal_commac_ops
emac_commac_ops
=
{
.
txeob
=
&
emac_txeob_dev
,
.
txde
=
&
emac_txde_dev
,
.
rxeob
=
&
emac_rxeob_dev
,
.
rxde
=
&
emac_rxde_dev
,
};
static
int
emac_init_device
(
struct
ocp_device
*
ocpdev
,
struct
ibm_ocp_mal
*
mal
)
{
int
deferred_init
=
0
;
int
rc
=
0
,
i
;
struct
net_device
*
ndev
;
struct
ocp_enet_private
*
ep
;
struct
ocp_func_emac_data
*
emacdata
;
int
commac_reg
=
0
;
u32
phy_map
;
emacdata
=
(
struct
ocp_func_emac_data
*
)
ocpdev
->
def
->
additions
;
if
(
!
emacdata
)
{
printk
(
KERN_ERR
"emac%d: Missing additional data!
\n
"
,
ocpdev
->
def
->
index
);
return
-
ENODEV
;
}
/* Allocate our net_device structure */
ndev
=
alloc_etherdev
(
sizeof
(
struct
ocp_enet_private
));
if
(
ndev
==
NULL
)
{
printk
(
KERN_ERR
"emac%d: Could not allocate ethernet device.
\n
"
,
ocpdev
->
def
->
index
);
return
-
ENOMEM
;
}
ep
=
ndev
->
priv
;
ep
->
ndev
=
ndev
;
ep
->
ocpdev
=
ocpdev
;
ndev
->
irq
=
ocpdev
->
def
->
irq
;
ep
->
wol_irq
=
emacdata
->
wol_irq
;
if
(
emacdata
->
mdio_idx
>=
0
)
{
if
(
emacdata
->
mdio_idx
==
ocpdev
->
def
->
index
)
{
/* Set the common MDIO net_device */
mdio_ndev
=
ndev
;
deferred_init
=
1
;
}
ep
->
mdio_dev
=
mdio_ndev
;
}
else
{
ep
->
mdio_dev
=
ndev
;
}
ocp_set_drvdata
(
ocpdev
,
ndev
);
spin_lock_init
(
&
ep
->
lock
);
/* Fill out MAL informations and register commac */
ep
->
mal
=
mal
;
ep
->
mal_tx_chan
=
emacdata
->
mal_tx_chan
;
ep
->
mal_rx_chan
=
emacdata
->
mal_rx_chan
;
ep
->
commac
.
ops
=
&
emac_commac_ops
;
ep
->
commac
.
dev
=
ndev
;
ep
->
commac
.
tx_chan_mask
=
MAL_CHAN_MASK
(
ep
->
mal_tx_chan
);
ep
->
commac
.
rx_chan_mask
=
MAL_CHAN_MASK
(
ep
->
mal_rx_chan
);
rc
=
mal_register_commac
(
ep
->
mal
,
&
ep
->
commac
);
if
(
rc
!=
0
)
goto
bail
;
commac_reg
=
1
;
/* Map our MMIOs */
ep
->
emacp
=
(
emac_t
*
)
ioremap
(
ocpdev
->
def
->
paddr
,
sizeof
(
emac_t
));
/* Check if we need to attach to a ZMII */
if
(
emacdata
->
zmii_idx
>=
0
)
{
ep
->
zmii_input
=
emacdata
->
zmii_mux
;
ep
->
zmii_dev
=
ocp_find_device
(
OCP_ANY_ID
,
OCP_FUNC_ZMII
,
emacdata
->
zmii_idx
);
if
(
ep
->
zmii_dev
==
NULL
)
printk
(
KERN_WARNING
"emac%d: ZMII %d requested but not found !
\n
"
,
ocpdev
->
def
->
index
,
emacdata
->
zmii_idx
);
else
if
((
rc
=
emac_init_zmii
(
ep
->
zmii_dev
,
ep
->
zmii_input
,
emacdata
->
phy_mode
))
!=
0
)
goto
bail
;
}
/* Check if we need to attach to a RGMII */
if
(
emacdata
->
rgmii_idx
>=
0
)
{
ep
->
rgmii_input
=
emacdata
->
rgmii_mux
;
ep
->
rgmii_dev
=
ocp_find_device
(
OCP_ANY_ID
,
OCP_FUNC_RGMII
,
emacdata
->
rgmii_idx
);
if
(
ep
->
rgmii_dev
==
NULL
)
printk
(
KERN_WARNING
"emac%d: RGMII %d requested but not found !
\n
"
,
ocpdev
->
def
->
index
,
emacdata
->
rgmii_idx
);
else
if
((
rc
=
emac_init_rgmii
(
ep
->
rgmii_dev
,
ep
->
rgmii_input
,
emacdata
->
phy_mode
))
!=
0
)
goto
bail
;
}
/* Check if we need to attach to a TAH */
if
(
emacdata
->
tah_idx
>=
0
)
{
ep
->
tah_dev
=
ocp_find_device
(
OCP_ANY_ID
,
OCP_FUNC_TAH
,
emacdata
->
tah_idx
);
if
(
ep
->
tah_dev
==
NULL
)
printk
(
KERN_WARNING
"emac%d: TAH %d requested but not found !
\n
"
,
ocpdev
->
def
->
index
,
emacdata
->
tah_idx
);
else
if
((
rc
=
emac_init_tah
(
ep
))
!=
0
)
goto
bail
;
}
if
(
deferred_init
)
{
if
(
!
list_empty
(
&
emac_init_list
))
{
struct
list_head
*
entry
;
struct
emac_def_dev
*
ddev
;
list_for_each
(
entry
,
&
emac_init_list
)
{
ddev
=
list_entry
(
entry
,
struct
emac_def_dev
,
link
);
emac_init_device
(
ddev
->
ocpdev
,
ddev
->
mal
);
}
}
}
/* Init link monitoring timer */
init_timer
(
&
ep
->
link_timer
);
ep
->
link_timer
.
function
=
emac_link_timer
;
ep
->
link_timer
.
data
=
(
unsigned
long
)
ep
;
ep
->
timer_ticks
=
0
;
/* Fill up the mii_phy structure */
ep
->
phy_mii
.
dev
=
ndev
;
ep
->
phy_mii
.
mdio_read
=
emac_phy_read
;
ep
->
phy_mii
.
mdio_write
=
emac_phy_write
;
ep
->
phy_mii
.
mode
=
emacdata
->
phy_mode
;
/* Find PHY */
phy_map
=
emacdata
->
phy_map
|
busy_phy_map
;
for
(
i
=
0
;
i
<=
0x1f
;
i
++
,
phy_map
>>=
1
)
{
if
((
phy_map
&
0x1
)
==
0
)
{
int
val
=
emac_phy_read
(
ndev
,
i
,
MII_BMCR
);
if
(
val
!=
0xffff
&&
val
!=
-
1
)
break
;
}
}
if
(
i
==
0x20
)
{
printk
(
KERN_WARNING
"emac%d: Can't find PHY.
\n
"
,
ocpdev
->
def
->
index
);
rc
=
-
ENODEV
;
goto
bail
;
}
busy_phy_map
|=
1
<<
i
;
ep
->
mii_phy_addr
=
i
;
rc
=
mii_phy_probe
(
&
ep
->
phy_mii
,
i
);
if
(
rc
)
{
printk
(
KERN_WARNING
"emac%d: Failed to probe PHY type.
\n
"
,
ocpdev
->
def
->
index
);
rc
=
-
ENODEV
;
goto
bail
;
}
/* Setup initial PHY config & startup aneg */
if
(
ep
->
phy_mii
.
def
->
ops
->
init
)
ep
->
phy_mii
.
def
->
ops
->
init
(
&
ep
->
phy_mii
);
netif_carrier_off
(
ndev
);
if
(
ep
->
phy_mii
.
def
->
features
&
SUPPORTED_Autoneg
)
ep
->
want_autoneg
=
1
;
emac_start_link
(
ep
,
NULL
);
/* read the MAC Address */
for
(
i
=
0
;
i
<
6
;
i
++
)
ndev
->
dev_addr
[
i
]
=
emacdata
->
mac_addr
[
i
];
/* Fill in the driver function table */
ndev
->
open
=
&
emac_open
;
ndev
->
hard_start_xmit
=
&
emac_start_xmit
;
ndev
->
stop
=
&
emac_close
;
ndev
->
get_stats
=
&
emac_stats
;
if
(
emacdata
->
jumbo
)
ndev
->
change_mtu
=
&
emac_change_mtu
;
ndev
->
set_mac_address
=
&
emac_set_mac_address
;
ndev
->
set_multicast_list
=
&
emac_set_multicast_list
;
ndev
->
do_ioctl
=
&
emac_ioctl
;
SET_ETHTOOL_OPS
(
ndev
,
&
emac_ethtool_ops
);
if
(
emacdata
->
tah_idx
>=
0
)
ndev
->
features
=
NETIF_F_IP_CSUM
|
NETIF_F_SG
;
SET_MODULE_OWNER
(
ndev
);
rc
=
register_netdev
(
ndev
);
if
(
rc
!=
0
)
goto
bail
;
printk
(
"%s: IBM emac, MAC %02x:%02x:%02x:%02x:%02x:%02x
\n
"
,
ndev
->
name
,
ndev
->
dev_addr
[
0
],
ndev
->
dev_addr
[
1
],
ndev
->
dev_addr
[
2
],
ndev
->
dev_addr
[
3
],
ndev
->
dev_addr
[
4
],
ndev
->
dev_addr
[
5
]);
printk
(
KERN_INFO
"%s: Found %s PHY (0x%02x)
\n
"
,
ndev
->
name
,
ep
->
phy_mii
.
def
->
name
,
ep
->
mii_phy_addr
);
bail:
if
(
rc
&&
commac_reg
)
mal_unregister_commac
(
ep
->
mal
,
&
ep
->
commac
);
if
(
rc
&&
ndev
)
kfree
(
ndev
);
return
rc
;
}
static
int
emac_probe
(
struct
ocp_device
*
ocpdev
)
{
struct
ocp_device
*
maldev
;
struct
ibm_ocp_mal
*
mal
;
struct
ocp_func_emac_data
*
emacdata
;
emacdata
=
(
struct
ocp_func_emac_data
*
)
ocpdev
->
def
->
additions
;
if
(
emacdata
==
NULL
)
{
printk
(
KERN_ERR
"emac%d: Missing additional datas !
\n
"
,
ocpdev
->
def
->
index
);
return
-
ENODEV
;
}
/* Get the MAL device */
maldev
=
ocp_find_device
(
OCP_ANY_ID
,
OCP_FUNC_MAL
,
emacdata
->
mal_idx
);
if
(
maldev
==
NULL
)
{
printk
(
"No maldev
\n
"
);
return
-
ENODEV
;
}
/*
* Get MAL driver data, it must be here due to link order.
* When the driver is modularized, symbol dependencies will
* ensure the MAL driver is already present if built as a
* module.
*/
mal
=
(
struct
ibm_ocp_mal
*
)
ocp_get_drvdata
(
maldev
);
if
(
mal
==
NULL
)
{
printk
(
"No maldrv
\n
"
);
return
-
ENODEV
;
}
/* If we depend on another EMAC for MDIO, wait for it to show up */
if
(
emacdata
->
mdio_idx
>=
0
&&
(
emacdata
->
mdio_idx
!=
ocpdev
->
def
->
index
)
&&
!
mdio_ndev
)
{
struct
emac_def_dev
*
ddev
;
/* Add this index to the deferred init table */
ddev
=
kmalloc
(
sizeof
(
struct
emac_def_dev
),
GFP_KERNEL
);
ddev
->
ocpdev
=
ocpdev
;
ddev
->
mal
=
mal
;
list_add_tail
(
&
ddev
->
link
,
&
emac_init_list
);
}
else
{
emac_init_device
(
ocpdev
,
mal
);
}
return
0
;
}
/* Structure for a device driver */
static
struct
ocp_device_id
emac_ids
[]
=
{
{.
vendor
=
OCP_ANY_ID
,.
function
=
OCP_FUNC_EMAC
},
{.
vendor
=
OCP_VENDOR_INVALID
}
};
static
struct
ocp_driver
emac_driver
=
{
.
name
=
"emac"
,
.
id_table
=
emac_ids
,
.
probe
=
emac_probe
,
.
remove
=
emac_remove
,
};
static
int
__init
emac_init
(
void
)
{
int
rc
;
printk
(
KERN_INFO
DRV_NAME
": "
DRV_DESC
", version "
DRV_VERSION
"
\n
"
);
printk
(
KERN_INFO
"Maintained by "
DRV_AUTHOR
"
\n
"
);
if
(
skb_res
>
2
)
{
printk
(
KERN_WARNING
"Invalid skb_res: %d, cropping to 2
\n
"
,
skb_res
);
skb_res
=
2
;
}
rc
=
ocp_register_driver
(
&
emac_driver
);
if
(
rc
<
0
)
{
ocp_unregister_driver
(
&
emac_driver
);
return
-
ENODEV
;
}
return
0
;
}
static
void
__exit
emac_exit
(
void
)
{
ocp_unregister_driver
(
&
emac_driver
);
}
module_init
(
emac_init
);
module_exit
(
emac_exit
);
drivers/net/ibm_emac/ibm_emac_core.h
0 → 100644
View file @
ee86da9d
/*
* ibm_emac_core.h
*
* Ethernet driver for the built in ethernet on the IBM 405 PowerPC
* processor.
*
* Armin Kuster akuster@mvista.com
* Sept, 2001
*
* Orignial driver
* Johnnie Peters
* jpeters@mvista.com
*
* Copyright 2000 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _IBM_EMAC_CORE_H_
#define _IBM_EMAC_CORE_H_
#include <linux/netdevice.h>
#include <asm/ocp.h>
#include <asm/mmu.h>
/* For phys_addr_t */
#include "ibm_emac.h"
#include "ibm_emac_phy.h"
#include "ibm_emac_rgmii.h"
#include "ibm_emac_zmii.h"
#include "ibm_emac_mal.h"
#include "ibm_emac_tah.h"
#ifndef CONFIG_IBM_EMAC_TXB
#define NUM_TX_BUFF 64
#define NUM_RX_BUFF 64
#else
#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
#endif
/* This does 16 byte alignment, exactly what we need.
* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
#ifndef CONFIG_IBM_EMAC_SKBRES
#define SKB_RES 2
#else
#define SKB_RES CONFIG_IBM_EMAC_SKBRES
#endif
/* Note about alignement. alloc_skb() returns a cache line
* aligned buffer. However, dev_alloc_skb() will add 16 more
* bytes and "reserve" them, so our buffer will actually end
* on a half cache line. What we do is to use directly
* alloc_skb, allocate 16 more bytes to match the total amount
* allocated by dev_alloc_skb(), but we don't reserve.
*/
#define MAX_NUM_BUF_DESC 255
#define DESC_BUF_SIZE 4080
/* max 4096-16 */
#define DESC_BUF_SIZE_REG (DESC_BUF_SIZE / 16)
/* Transmitter timeout. */
#define TX_TIMEOUT (2*HZ)
/* MDIO latency delay */
#define MDIO_DELAY 50
/* Power managment shift registers */
#define IBM_CPM_EMMII 0
/* Shift value for MII */
#define IBM_CPM_EMRX 1
/* Shift value for recv */
#define IBM_CPM_EMTX 2
/* Shift value for MAC */
#define IBM_CPM_EMAC(x) (((x)>>IBM_CPM_EMMII) | ((x)>>IBM_CPM_EMRX) | ((x)>>IBM_CPM_EMTX))
#define ENET_HEADER_SIZE 14
#define ENET_FCS_SIZE 4
#define ENET_DEF_MTU_SIZE 1500
#define ENET_DEF_BUF_SIZE (ENET_DEF_MTU_SIZE + ENET_HEADER_SIZE + ENET_FCS_SIZE)
#define EMAC_MIN_FRAME 64
#define EMAC_MAX_FRAME 9018
#define EMAC_MIN_MTU (EMAC_MIN_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE)
#define EMAC_MAX_MTU (EMAC_MAX_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE)
#ifdef CONFIG_IBM_EMAC_ERRMSG
void
emac_serr_dump_0
(
struct
net_device
*
dev
);
void
emac_serr_dump_1
(
struct
net_device
*
dev
);
void
emac_err_dump
(
struct
net_device
*
dev
,
int
em0isr
);
void
emac_phy_dump
(
struct
net_device
*
);
void
emac_desc_dump
(
struct
net_device
*
);
void
emac_mac_dump
(
struct
net_device
*
);
void
emac_mal_dump
(
struct
net_device
*
);
#else
#define emac_serr_dump_0(dev) do { } while (0)
#define emac_serr_dump_1(dev) do { } while (0)
#define emac_err_dump(dev,x) do { } while (0)
#define emac_phy_dump(dev) do { } while (0)
#define emac_desc_dump(dev) do { } while (0)
#define emac_mac_dump(dev) do { } while (0)
#define emac_mal_dump(dev) do { } while (0)
#endif
struct
ocp_enet_private
{
struct
sk_buff
*
tx_skb
[
NUM_TX_BUFF
];
struct
sk_buff
*
rx_skb
[
NUM_RX_BUFF
];
struct
mal_descriptor
*
tx_desc
;
struct
mal_descriptor
*
rx_desc
;
struct
mal_descriptor
*
rx_dirty
;
struct
net_device_stats
stats
;
int
tx_cnt
;
int
rx_slot
;
int
dirty_rx
;
int
tx_slot
;
int
ack_slot
;
int
rx_buffer_size
;
struct
mii_phy
phy_mii
;
int
mii_phy_addr
;
int
want_autoneg
;
int
timer_ticks
;
struct
timer_list
link_timer
;
struct
net_device
*
mdio_dev
;
struct
ocp_device
*
rgmii_dev
;
int
rgmii_input
;
struct
ocp_device
*
zmii_dev
;
int
zmii_input
;
struct
ibm_ocp_mal
*
mal
;
int
mal_tx_chan
,
mal_rx_chan
;
struct
mal_commac
commac
;
struct
ocp_device
*
tah_dev
;
int
opened
;
int
going_away
;
int
wol_irq
;
emac_t
*
emacp
;
struct
ocp_device
*
ocpdev
;
struct
net_device
*
ndev
;
spinlock_t
lock
;
};
#endif
/* _IBM_EMAC_CORE_H_ */
drivers/net/ibm_emac/ibm_emac_debug.c
0 → 100644
View file @
ee86da9d
/*
* ibm_ocp_debug.c
*
* This has all the debug routines that where in *_enet.c
*
* Armin Kuster akuster@mvista.com
* April , 2002
*
* Copyright 2002 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <asm/io.h>
#include "ibm_ocp_mal.h"
#include "ibm_ocp_zmii.h"
#include "ibm_ocp_enet.h"
extern
int
emac_phy_read
(
struct
net_device
*
dev
,
int
mii_id
,
int
reg
);
void
emac_phy_dump
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
unsigned
long
i
;
uint
data
;
printk
(
KERN_DEBUG
" Prepare for Phy dump....
\n
"
);
for
(
i
=
0
;
i
<
0x1A
;
i
++
)
{
data
=
emac_phy_read
(
dev
,
fep
->
mii_phy_addr
,
i
);
printk
(
KERN_DEBUG
"Phy reg 0x%lx ==> %4x
\n
"
,
i
,
data
);
if
(
i
==
0x07
)
i
=
0x0f
;
}
}
void
emac_desc_dump
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
int
curr_slot
;
printk
(
KERN_DEBUG
"dumping the receive descriptors: current slot is %d
\n
"
,
fep
->
rx_slot
);
for
(
curr_slot
=
0
;
curr_slot
<
NUM_RX_BUFF
;
curr_slot
++
)
{
printk
(
KERN_DEBUG
"Desc %02d: status 0x%04x, length %3d, addr 0x%x
\n
"
,
curr_slot
,
fep
->
rx_desc
[
curr_slot
].
ctrl
,
fep
->
rx_desc
[
curr_slot
].
data_len
,
(
unsigned
int
)
fep
->
rx_desc
[
curr_slot
].
data_ptr
);
}
}
void
emac_mac_dump
(
struct
net_device
*
dev
)
{
struct
ocp_enet_private
*
fep
=
dev
->
priv
;
volatile
emac_t
*
emacp
=
fep
->
emacp
;
printk
(
KERN_DEBUG
"EMAC DEBUG **********
\n
"
);
printk
(
KERN_DEBUG
"EMAC_M0 ==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0mr0
));
printk
(
KERN_DEBUG
"EMAC_M1 ==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0mr1
));
printk
(
KERN_DEBUG
"EMAC_TXM0==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0tmr0
));
printk
(
KERN_DEBUG
"EMAC_TXM1==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0tmr1
));
printk
(
KERN_DEBUG
"EMAC_RXM ==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0rmr
));
printk
(
KERN_DEBUG
"EMAC_ISR ==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0isr
));
printk
(
KERN_DEBUG
"EMAC_IER ==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0iser
));
printk
(
KERN_DEBUG
"EMAC_IAH ==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0iahr
));
printk
(
KERN_DEBUG
"EMAC_IAL ==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0ialr
));
printk
(
KERN_DEBUG
"EMAC_VLAN_TPID_REG ==> 0x%x
\n
"
,
in_be32
(
&
emacp
->
em0vtpid
));
}
void
emac_mal_dump
(
struct
net_device
*
dev
)
{
struct
ibm_ocp_mal
*
mal
=
((
struct
ocp_enet_private
*
)
dev
->
priv
)
->
mal
;
printk
(
KERN_DEBUG
" MAL DEBUG **********
\n
"
);
printk
(
KERN_DEBUG
" MCR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALCR
));
printk
(
KERN_DEBUG
" ESR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALESR
));
printk
(
KERN_DEBUG
" IER ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALIER
));
#ifdef CONFIG_40x
printk
(
KERN_DEBUG
" DBR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALDBR
));
#endif
/* CONFIG_40x */
printk
(
KERN_DEBUG
" TXCASR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALTXCASR
));
printk
(
KERN_DEBUG
" TXCARR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALTXCARR
));
printk
(
KERN_DEBUG
" TXEOBISR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALTXEOBISR
));
printk
(
KERN_DEBUG
" TXDEIR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALTXDEIR
));
printk
(
KERN_DEBUG
" RXCASR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALRXCASR
));
printk
(
KERN_DEBUG
" RXCARR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALRXCARR
));
printk
(
KERN_DEBUG
" RXEOBISR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALRXEOBISR
));
printk
(
KERN_DEBUG
" RXDEIR ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALRXDEIR
));
printk
(
KERN_DEBUG
" TXCTP0R ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALTXCTP0R
));
printk
(
KERN_DEBUG
" TXCTP1R ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALTXCTP1R
));
printk
(
KERN_DEBUG
" TXCTP2R ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALTXCTP2R
));
printk
(
KERN_DEBUG
" TXCTP3R ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALTXCTP3R
));
printk
(
KERN_DEBUG
" RXCTP0R ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALRXCTP0R
));
printk
(
KERN_DEBUG
" RXCTP1R ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALRXCTP1R
));
printk
(
KERN_DEBUG
" RCBS0 ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALRCBS0
));
printk
(
KERN_DEBUG
" RCBS1 ==> 0x%x
\n
"
,
(
unsigned
int
)
get_mal_dcrn
(
mal
,
DCRN_MALRCBS1
));
}
void
emac_serr_dump_0
(
struct
net_device
*
dev
)
{
struct
ibm_ocp_mal
*
mal
=
((
struct
ocp_enet_private
*
)
dev
->
priv
)
->
mal
;
unsigned
long
int
mal_error
,
plb_error
,
plb_addr
;
mal_error
=
get_mal_dcrn
(
mal
,
DCRN_MALESR
);
printk
(
KERN_DEBUG
"ppc405_eth_serr: %s channel %ld
\n
"
,
(
mal_error
&
0x40000000
)
?
"Receive"
:
"Transmit"
,
(
mal_error
&
0x3e000000
)
>>
25
);
printk
(
KERN_DEBUG
" ----- latched error -----
\n
"
);
if
(
mal_error
&
MALESR_DE
)
printk
(
KERN_DEBUG
" DE: descriptor error
\n
"
);
if
(
mal_error
&
MALESR_OEN
)
printk
(
KERN_DEBUG
" ONE: OPB non-fullword error
\n
"
);
if
(
mal_error
&
MALESR_OTE
)
printk
(
KERN_DEBUG
" OTE: OPB timeout error
\n
"
);
if
(
mal_error
&
MALESR_OSE
)
printk
(
KERN_DEBUG
" OSE: OPB slave error
\n
"
);
if
(
mal_error
&
MALESR_PEIN
)
{
plb_error
=
mfdcr
(
DCRN_PLB0_BESR
);
printk
(
KERN_DEBUG
" PEIN: PLB error, PLB0_BESR is 0x%x
\n
"
,
(
unsigned
int
)
plb_error
);
plb_addr
=
mfdcr
(
DCRN_PLB0_BEAR
);
printk
(
KERN_DEBUG
" PEIN: PLB error, PLB0_BEAR is 0x%x
\n
"
,
(
unsigned
int
)
plb_addr
);
}
}
void
emac_serr_dump_1
(
struct
net_device
*
dev
)
{
struct
ibm_ocp_mal
*
mal
=
((
struct
ocp_enet_private
*
)
dev
->
priv
)
->
mal
;
int
mal_error
=
get_mal_dcrn
(
mal
,
DCRN_MALESR
);
printk
(
KERN_DEBUG
" ----- cumulative errors -----
\n
"
);
if
(
mal_error
&
MALESR_DEI
)
printk
(
KERN_DEBUG
" DEI: descriptor error interrupt
\n
"
);
if
(
mal_error
&
MALESR_ONEI
)
printk
(
KERN_DEBUG
" OPB non-fullword error interrupt
\n
"
);
if
(
mal_error
&
MALESR_OTEI
)
printk
(
KERN_DEBUG
" OTEI: timeout error interrupt
\n
"
);
if
(
mal_error
&
MALESR_OSEI
)
printk
(
KERN_DEBUG
" OSEI: slave error interrupt
\n
"
);
if
(
mal_error
&
MALESR_PBEI
)
printk
(
KERN_DEBUG
" PBEI: PLB bus error interrupt
\n
"
);
}
void
emac_err_dump
(
struct
net_device
*
dev
,
int
em0isr
)
{
printk
(
KERN_DEBUG
"%s: on-chip ethernet error:
\n
"
,
dev
->
name
);
if
(
em0isr
&
EMAC_ISR_OVR
)
printk
(
KERN_DEBUG
" OVR: overrun
\n
"
);
if
(
em0isr
&
EMAC_ISR_PP
)
printk
(
KERN_DEBUG
" PP: control pause packet
\n
"
);
if
(
em0isr
&
EMAC_ISR_BP
)
printk
(
KERN_DEBUG
" BP: packet error
\n
"
);
if
(
em0isr
&
EMAC_ISR_RP
)
printk
(
KERN_DEBUG
" RP: runt packet
\n
"
);
if
(
em0isr
&
EMAC_ISR_SE
)
printk
(
KERN_DEBUG
" SE: short event
\n
"
);
if
(
em0isr
&
EMAC_ISR_ALE
)
printk
(
KERN_DEBUG
" ALE: odd number of nibbles in packet
\n
"
);
if
(
em0isr
&
EMAC_ISR_BFCS
)
printk
(
KERN_DEBUG
" BFCS: bad FCS
\n
"
);
if
(
em0isr
&
EMAC_ISR_PTLE
)
printk
(
KERN_DEBUG
" PTLE: oversized packet
\n
"
);
if
(
em0isr
&
EMAC_ISR_ORE
)
printk
(
KERN_DEBUG
" ORE: packet length field > max allowed LLC
\n
"
);
if
(
em0isr
&
EMAC_ISR_IRE
)
printk
(
KERN_DEBUG
" IRE: In Range error
\n
"
);
if
(
em0isr
&
EMAC_ISR_DBDM
)
printk
(
KERN_DEBUG
" DBDM: xmit error or SQE
\n
"
);
if
(
em0isr
&
EMAC_ISR_DB0
)
printk
(
KERN_DEBUG
" DB0: xmit error or SQE on TX channel 0
\n
"
);
if
(
em0isr
&
EMAC_ISR_SE0
)
printk
(
KERN_DEBUG
" SE0: Signal Quality Error test failure from TX channel 0
\n
"
);
if
(
em0isr
&
EMAC_ISR_TE0
)
printk
(
KERN_DEBUG
" TE0: xmit channel 0 aborted
\n
"
);
if
(
em0isr
&
EMAC_ISR_DB1
)
printk
(
KERN_DEBUG
" DB1: xmit error or SQE on TX channel
\n
"
);
if
(
em0isr
&
EMAC_ISR_SE1
)
printk
(
KERN_DEBUG
" SE1: Signal Quality Error test failure from TX channel 1
\n
"
);
if
(
em0isr
&
EMAC_ISR_TE1
)
printk
(
KERN_DEBUG
" TE1: xmit channel 1 aborted
\n
"
);
if
(
em0isr
&
EMAC_ISR_MOS
)
printk
(
KERN_DEBUG
" MOS
\n
"
);
if
(
em0isr
&
EMAC_ISR_MOF
)
printk
(
KERN_DEBUG
" MOF
\n
"
);
emac_mac_dump
(
dev
);
emac_mal_dump
(
dev
);
}
drivers/net/ibm_emac/ibm_emac_mal.c
0 → 100644
View file @
ee86da9d
/*
* ibm_ocp_mal.c
*
* Armin Kuster akuster@mvista.com
* Juen, 2002
*
* Copyright 2002 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/ocp.h>
#include "ibm_emac_mal.h"
// Locking: Should we share a lock with the client ? The client could provide
// a lock pointer (optionally) in the commac structure... I don't think this is
// really necessary though
/* This lock protects the commac list. On today UP implementations, it's
* really only used as IRQ protection in mal_{register,unregister}_commac()
*/
static
rwlock_t
mal_list_lock
=
RW_LOCK_UNLOCKED
;
int
mal_register_commac
(
struct
ibm_ocp_mal
*
mal
,
struct
mal_commac
*
commac
)
{
unsigned
long
flags
;
write_lock_irqsave
(
&
mal_list_lock
,
flags
);
/* Don't let multiple commacs claim the same channel */
if
((
mal
->
tx_chan_mask
&
commac
->
tx_chan_mask
)
||
(
mal
->
rx_chan_mask
&
commac
->
rx_chan_mask
))
{
write_unlock_irqrestore
(
&
mal_list_lock
,
flags
);
return
-
EBUSY
;
}
mal
->
tx_chan_mask
|=
commac
->
tx_chan_mask
;
mal
->
rx_chan_mask
|=
commac
->
rx_chan_mask
;
list_add
(
&
commac
->
list
,
&
mal
->
commac
);
write_unlock_irqrestore
(
&
mal_list_lock
,
flags
);
MOD_INC_USE_COUNT
;
return
0
;
}
int
mal_unregister_commac
(
struct
ibm_ocp_mal
*
mal
,
struct
mal_commac
*
commac
)
{
unsigned
long
flags
;
write_lock_irqsave
(
&
mal_list_lock
,
flags
);
mal
->
tx_chan_mask
&=
~
commac
->
tx_chan_mask
;
mal
->
rx_chan_mask
&=
~
commac
->
rx_chan_mask
;
list_del_init
(
&
commac
->
list
);
write_unlock_irqrestore
(
&
mal_list_lock
,
flags
);
MOD_DEC_USE_COUNT
;
return
0
;
}
int
mal_set_rcbs
(
struct
ibm_ocp_mal
*
mal
,
int
channel
,
unsigned
long
size
)
{
switch
(
channel
)
{
case
0
:
set_mal_dcrn
(
mal
,
DCRN_MALRCBS0
,
size
);
break
;
#ifdef DCRN_MALRCBS1
case
1
:
set_mal_dcrn
(
mal
,
DCRN_MALRCBS1
,
size
);
break
;
#endif
#ifdef DCRN_MALRCBS2
case
2
:
set_mal_dcrn
(
mal
,
DCRN_MALRCBS2
,
size
);
break
;
#endif
#ifdef DCRN_MALRCBS3
case
3
:
set_mal_dcrn
(
mal
,
DCRN_MALRCBS3
,
size
);
break
;
#endif
default:
return
-
EINVAL
;
}
return
0
;
}
static
irqreturn_t
mal_serr
(
int
irq
,
void
*
dev_instance
,
struct
pt_regs
*
regs
)
{
struct
ibm_ocp_mal
*
mal
=
dev_instance
;
unsigned
long
mal_error
;
/*
* This SERR applies to one of the devices on the MAL, here we charge
* it against the first EMAC registered for the MAL.
*/
mal_error
=
get_mal_dcrn
(
mal
,
DCRN_MALESR
);
printk
(
KERN_ERR
"%s: System Error (MALESR=%lx)
\n
"
,
"MAL"
/* FIXME: get the name right */
,
mal_error
);
/* FIXME: decipher error */
/* DIXME: distribute to commacs, if possible */
/* Clear the error status register */
set_mal_dcrn
(
mal
,
DCRN_MALESR
,
mal_error
);
return
IRQ_HANDLED
;
}
static
irqreturn_t
mal_txeob
(
int
irq
,
void
*
dev_instance
,
struct
pt_regs
*
regs
)
{
struct
ibm_ocp_mal
*
mal
=
dev_instance
;
struct
list_head
*
l
;
unsigned
long
isr
;
isr
=
get_mal_dcrn
(
mal
,
DCRN_MALTXEOBISR
);
set_mal_dcrn
(
mal
,
DCRN_MALTXEOBISR
,
isr
);
read_lock
(
&
mal_list_lock
);
list_for_each
(
l
,
&
mal
->
commac
)
{
struct
mal_commac
*
mc
=
list_entry
(
l
,
struct
mal_commac
,
list
);
if
(
isr
&
mc
->
tx_chan_mask
)
{
mc
->
ops
->
txeob
(
mc
->
dev
,
isr
&
mc
->
tx_chan_mask
);
}
}
read_unlock
(
&
mal_list_lock
);
return
IRQ_HANDLED
;
}
static
irqreturn_t
mal_rxeob
(
int
irq
,
void
*
dev_instance
,
struct
pt_regs
*
regs
)
{
struct
ibm_ocp_mal
*
mal
=
dev_instance
;
struct
list_head
*
l
;
unsigned
long
isr
;
isr
=
get_mal_dcrn
(
mal
,
DCRN_MALRXEOBISR
);
set_mal_dcrn
(
mal
,
DCRN_MALRXEOBISR
,
isr
);
read_lock
(
&
mal_list_lock
);
list_for_each
(
l
,
&
mal
->
commac
)
{
struct
mal_commac
*
mc
=
list_entry
(
l
,
struct
mal_commac
,
list
);
if
(
isr
&
mc
->
rx_chan_mask
)
{
mc
->
ops
->
rxeob
(
mc
->
dev
,
isr
&
mc
->
rx_chan_mask
);
}
}
read_unlock
(
&
mal_list_lock
);
return
IRQ_HANDLED
;
}
static
irqreturn_t
mal_txde
(
int
irq
,
void
*
dev_instance
,
struct
pt_regs
*
regs
)
{
struct
ibm_ocp_mal
*
mal
=
dev_instance
;
struct
list_head
*
l
;
unsigned
long
deir
;
deir
=
get_mal_dcrn
(
mal
,
DCRN_MALTXDEIR
);
/* FIXME: print which MAL correctly */
printk
(
KERN_WARNING
"%s: Tx descriptor error (MALTXDEIR=%lx)
\n
"
,
"MAL"
,
deir
);
read_lock
(
&
mal_list_lock
);
list_for_each
(
l
,
&
mal
->
commac
)
{
struct
mal_commac
*
mc
=
list_entry
(
l
,
struct
mal_commac
,
list
);
if
(
deir
&
mc
->
tx_chan_mask
)
{
mc
->
ops
->
txde
(
mc
->
dev
,
deir
&
mc
->
tx_chan_mask
);
}
}
read_unlock
(
&
mal_list_lock
);
return
IRQ_HANDLED
;
}
/*
* This interrupt should be very rare at best. This occurs when
* the hardware has a problem with the receive descriptors. The manual
* states that it occurs when the hardware cannot the receive descriptor
* empty bit is not set. The recovery mechanism will be to
* traverse through the descriptors, handle any that are marked to be
* handled and reinitialize each along the way. At that point the driver
* will be restarted.
*/
static
irqreturn_t
mal_rxde
(
int
irq
,
void
*
dev_instance
,
struct
pt_regs
*
regs
)
{
struct
ibm_ocp_mal
*
mal
=
dev_instance
;
struct
list_head
*
l
;
unsigned
long
deir
;
deir
=
get_mal_dcrn
(
mal
,
DCRN_MALRXDEIR
);
/*
* This really is needed. This case encountered in stress testing.
*/
if
(
deir
==
0
)
return
IRQ_HANDLED
;
/* FIXME: print which MAL correctly */
printk
(
KERN_WARNING
"%s: Rx descriptor error (MALRXDEIR=%lx)
\n
"
,
"MAL"
,
deir
);
read_lock
(
&
mal_list_lock
);
list_for_each
(
l
,
&
mal
->
commac
)
{
struct
mal_commac
*
mc
=
list_entry
(
l
,
struct
mal_commac
,
list
);
if
(
deir
&
mc
->
rx_chan_mask
)
{
mc
->
ops
->
rxde
(
mc
->
dev
,
deir
&
mc
->
rx_chan_mask
);
}
}
read_unlock
(
&
mal_list_lock
);
return
IRQ_HANDLED
;
}
static
int
__init
mal_probe
(
struct
ocp_device
*
ocpdev
)
{
struct
ibm_ocp_mal
*
mal
=
NULL
;
struct
ocp_func_mal_data
*
maldata
;
int
err
=
0
;
maldata
=
(
struct
ocp_func_mal_data
*
)
ocpdev
->
def
->
additions
;
if
(
maldata
==
NULL
)
{
printk
(
KERN_ERR
"mal%d: Missing additional datas !
\n
"
,
ocpdev
->
def
->
index
);
return
-
ENODEV
;
}
mal
=
kmalloc
(
sizeof
(
struct
ibm_ocp_mal
),
GFP_KERNEL
);
if
(
mal
==
NULL
)
{
printk
(
KERN_ERR
"mal%d: Out of memory allocating MAL structure !
\n
"
,
ocpdev
->
def
->
index
);
return
-
ENOMEM
;
}
memset
(
mal
,
0
,
sizeof
(
*
mal
));
switch
(
ocpdev
->
def
->
index
)
{
case
0
:
mal
->
dcrbase
=
DCRN_MAL_BASE
;
break
;
#ifdef DCRN_MAL1_BASE
case
1
:
mal
->
dcrbase
=
DCRN_MAL1_BASE
;
break
;
#endif
default:
BUG
();
}
/**************************/
INIT_LIST_HEAD
(
&
mal
->
commac
);
set_mal_dcrn
(
mal
,
DCRN_MALRXCARR
,
0xFFFFFFFF
);
set_mal_dcrn
(
mal
,
DCRN_MALTXCARR
,
0xFFFFFFFF
);
set_mal_dcrn
(
mal
,
DCRN_MALCR
,
MALCR_MMSR
);
/* 384 */
/* FIXME: Add delay */
/* Set the MAL configuration register */
set_mal_dcrn
(
mal
,
DCRN_MALCR
,
MALCR_PLBB
|
MALCR_OPBBL
|
MALCR_LEA
|
MALCR_PLBLT_DEFAULT
);
/* It would be nice to allocate buffers separately for each
* channel, but we can't because the channels share the upper
* 13 bits of address lines. Each channels buffer must also
* be 4k aligned, so we allocate 4k for each channel. This is
* inefficient FIXME: do better, if possible */
mal
->
tx_virt_addr
=
dma_alloc_coherent
(
&
ocpdev
->
dev
,
MAL_DT_ALIGN
*
maldata
->
num_tx_chans
,
&
mal
->
tx_phys_addr
,
GFP_KERNEL
);
if
(
mal
->
tx_virt_addr
==
NULL
)
{
printk
(
KERN_ERR
"mal%d: Out of memory allocating MAL descriptors !
\n
"
,
ocpdev
->
def
->
index
);
err
=
-
ENOMEM
;
goto
fail
;
}
/* God, oh, god, I hate DCRs */
set_mal_dcrn
(
mal
,
DCRN_MALTXCTP0R
,
mal
->
tx_phys_addr
);
#ifdef DCRN_MALTXCTP1R
if
(
maldata
->
num_tx_chans
>
1
)
set_mal_dcrn
(
mal
,
DCRN_MALTXCTP1R
,
mal
->
tx_phys_addr
+
MAL_DT_ALIGN
);
#endif
/* DCRN_MALTXCTP1R */
#ifdef DCRN_MALTXCTP2R
if
(
maldata
->
num_tx_chans
>
2
)
set_mal_dcrn
(
mal
,
DCRN_MALTXCTP2R
,
mal
->
tx_phys_addr
+
2
*
MAL_DT_ALIGN
);
#endif
/* DCRN_MALTXCTP2R */
#ifdef DCRN_MALTXCTP3R
if
(
maldata
->
num_tx_chans
>
3
)
set_mal_dcrn
(
mal
,
DCRN_MALTXCTP3R
,
mal
->
tx_phys_addr
+
3
*
MAL_DT_ALIGN
);
#endif
/* DCRN_MALTXCTP3R */
#ifdef DCRN_MALTXCTP4R
if
(
maldata
->
num_tx_chans
>
4
)
set_mal_dcrn
(
mal
,
DCRN_MALTXCTP4R
,
mal
->
tx_phys_addr
+
4
*
MAL_DT_ALIGN
);
#endif
/* DCRN_MALTXCTP4R */
#ifdef DCRN_MALTXCTP5R
if
(
maldata
->
num_tx_chans
>
5
)
set_mal_dcrn
(
mal
,
DCRN_MALTXCTP5R
,
mal
->
tx_phys_addr
+
5
*
MAL_DT_ALIGN
);
#endif
/* DCRN_MALTXCTP5R */
#ifdef DCRN_MALTXCTP6R
if
(
maldata
->
num_tx_chans
>
6
)
set_mal_dcrn
(
mal
,
DCRN_MALTXCTP6R
,
mal
->
tx_phys_addr
+
6
*
MAL_DT_ALIGN
);
#endif
/* DCRN_MALTXCTP6R */
#ifdef DCRN_MALTXCTP7R
if
(
maldata
->
num_tx_chans
>
7
)
set_mal_dcrn
(
mal
,
DCRN_MALTXCTP7R
,
mal
->
tx_phys_addr
+
7
*
MAL_DT_ALIGN
);
#endif
/* DCRN_MALTXCTP7R */
mal
->
rx_virt_addr
=
dma_alloc_coherent
(
&
ocpdev
->
dev
,
MAL_DT_ALIGN
*
maldata
->
num_rx_chans
,
&
mal
->
rx_phys_addr
,
GFP_KERNEL
);
set_mal_dcrn
(
mal
,
DCRN_MALRXCTP0R
,
mal
->
rx_phys_addr
);
#ifdef DCRN_MALRXCTP1R
if
(
maldata
->
num_rx_chans
>
1
)
set_mal_dcrn
(
mal
,
DCRN_MALRXCTP1R
,
mal
->
rx_phys_addr
+
MAL_DT_ALIGN
);
#endif
/* DCRN_MALRXCTP1R */
#ifdef DCRN_MALRXCTP2R
if
(
maldata
->
num_rx_chans
>
2
)
set_mal_dcrn
(
mal
,
DCRN_MALRXCTP2R
,
mal
->
rx_phys_addr
+
2
*
MAL_DT_ALIGN
);
#endif
/* DCRN_MALRXCTP2R */
#ifdef DCRN_MALRXCTP3R
if
(
maldata
->
num_rx_chans
>
3
)
set_mal_dcrn
(
mal
,
DCRN_MALRXCTP3R
,
mal
->
rx_phys_addr
+
3
*
MAL_DT_ALIGN
);
#endif
/* DCRN_MALRXCTP3R */
err
=
request_irq
(
maldata
->
serr_irq
,
mal_serr
,
0
,
"MAL SERR"
,
mal
);
if
(
err
)
goto
fail
;
err
=
request_irq
(
maldata
->
txde_irq
,
mal_txde
,
0
,
"MAL TX DE "
,
mal
);
if
(
err
)
goto
fail
;
err
=
request_irq
(
maldata
->
txeob_irq
,
mal_txeob
,
0
,
"MAL TX EOB"
,
mal
);
if
(
err
)
goto
fail
;
err
=
request_irq
(
maldata
->
rxde_irq
,
mal_rxde
,
0
,
"MAL RX DE"
,
mal
);
if
(
err
)
goto
fail
;
err
=
request_irq
(
maldata
->
rxeob_irq
,
mal_rxeob
,
0
,
"MAL RX EOB"
,
mal
);
if
(
err
)
goto
fail
;
set_mal_dcrn
(
mal
,
DCRN_MALIER
,
MALIER_DE
|
MALIER_NE
|
MALIER_TE
|
MALIER_OPBE
|
MALIER_PLBE
);
/* Advertise me to the rest of the world */
ocp_set_drvdata
(
ocpdev
,
mal
);
printk
(
KERN_INFO
"mal%d: Initialized, %d tx channels, %d rx channels
\n
"
,
ocpdev
->
def
->
index
,
maldata
->
num_tx_chans
,
maldata
->
num_rx_chans
);
return
0
;
fail:
/* FIXME: dispose requested IRQs ! */
if
(
err
&&
mal
)
kfree
(
mal
);
return
err
;
}
static
void
__exit
mal_remove
(
struct
ocp_device
*
ocpdev
)
{
struct
ibm_ocp_mal
*
mal
=
ocp_get_drvdata
(
ocpdev
);
struct
ocp_func_mal_data
*
maldata
=
ocpdev
->
def
->
additions
;
BUG_ON
(
!
maldata
);
ocp_set_drvdata
(
ocpdev
,
NULL
);
/* FIXME: shut down the MAL, deal with dependency with emac */
free_irq
(
maldata
->
serr_irq
,
mal
);
free_irq
(
maldata
->
txde_irq
,
mal
);
free_irq
(
maldata
->
txeob_irq
,
mal
);
free_irq
(
maldata
->
rxde_irq
,
mal
);
free_irq
(
maldata
->
rxeob_irq
,
mal
);
if
(
mal
->
tx_virt_addr
)
dma_free_coherent
(
&
ocpdev
->
dev
,
MAL_DT_ALIGN
*
maldata
->
num_tx_chans
,
mal
->
tx_virt_addr
,
mal
->
tx_phys_addr
);
if
(
mal
->
rx_virt_addr
)
dma_free_coherent
(
&
ocpdev
->
dev
,
MAL_DT_ALIGN
*
maldata
->
num_rx_chans
,
mal
->
rx_virt_addr
,
mal
->
rx_phys_addr
);
kfree
(
mal
);
}
/* Structure for a device driver */
static
struct
ocp_device_id
mal_ids
[]
=
{
{.
vendor
=
OCP_ANY_ID
,.
function
=
OCP_FUNC_MAL
},
{.
vendor
=
OCP_VENDOR_INVALID
}
};
static
struct
ocp_driver
mal_driver
=
{
.
name
=
"mal"
,
.
id_table
=
mal_ids
,
.
probe
=
mal_probe
,
.
remove
=
mal_remove
,
};
static
int
__init
init_mals
(
void
)
{
int
rc
;
rc
=
ocp_register_driver
(
&
mal_driver
);
if
(
rc
<
0
)
{
ocp_unregister_driver
(
&
mal_driver
);
return
-
ENODEV
;
}
return
0
;
}
static
void
__exit
exit_mals
(
void
)
{
ocp_unregister_driver
(
&
mal_driver
);
}
module_init
(
init_mals
);
module_exit
(
exit_mals
);
drivers/net/ibm_emac/ibm_emac_mal.h
0 → 100644
View file @
ee86da9d
#ifndef _IBM_EMAC_MAL_H
#define _IBM_EMAC_MAL_H
#include <linux/list.h>
#define MAL_DT_ALIGN (4096)
/* Alignment for each channel's descriptor table */
#define MAL_CHAN_MASK(chan) (0x80000000 >> (chan))
/* MAL Buffer Descriptor structure */
struct
mal_descriptor
{
unsigned
short
ctrl
;
/* MAL / Commac status control bits */
short
data_len
;
/* Max length is 4K-1 (12 bits) */
unsigned
char
*
data_ptr
;
/* pointer to actual data buffer */
}
__attribute__
((
packed
));
/* the following defines are for the MadMAL status and control registers. */
/* MADMAL transmit and receive status/control bits */
#define MAL_RX_CTRL_EMPTY 0x8000
#define MAL_RX_CTRL_WRAP 0x4000
#define MAL_RX_CTRL_CM 0x2000
#define MAL_RX_CTRL_LAST 0x1000
#define MAL_RX_CTRL_FIRST 0x0800
#define MAL_RX_CTRL_INTR 0x0400
#define MAL_TX_CTRL_READY 0x8000
#define MAL_TX_CTRL_WRAP 0x4000
#define MAL_TX_CTRL_CM 0x2000
#define MAL_TX_CTRL_LAST 0x1000
#define MAL_TX_CTRL_INTR 0x0400
struct
mal_commac_ops
{
void
(
*
txeob
)
(
void
*
dev
,
u32
chanmask
);
void
(
*
txde
)
(
void
*
dev
,
u32
chanmask
);
void
(
*
rxeob
)
(
void
*
dev
,
u32
chanmask
);
void
(
*
rxde
)
(
void
*
dev
,
u32
chanmask
);
};
struct
mal_commac
{
struct
mal_commac_ops
*
ops
;
void
*
dev
;
u32
tx_chan_mask
,
rx_chan_mask
;
struct
list_head
list
;
};
struct
ibm_ocp_mal
{
int
dcrbase
;
struct
list_head
commac
;
u32
tx_chan_mask
,
rx_chan_mask
;
dma_addr_t
tx_phys_addr
;
struct
mal_descriptor
*
tx_virt_addr
;
dma_addr_t
rx_phys_addr
;
struct
mal_descriptor
*
rx_virt_addr
;
};
#define GET_MAL_STANZA(base,dcrn) \
case base: \
x = mfdcr(dcrn(base)); \
break;
#define SET_MAL_STANZA(base,dcrn, val) \
case base: \
mtdcr(dcrn(base), (val)); \
break;
#define GET_MAL0_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL_BASE,dcrn)
#define SET_MAL0_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL_BASE,dcrn,val)
#ifdef DCRN_MAL1_BASE
#define GET_MAL1_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL1_BASE,dcrn)
#define SET_MAL1_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL1_BASE,dcrn,val)
#else
/* ! DCRN_MAL1_BASE */
#define GET_MAL1_STANZA(dcrn)
#define SET_MAL1_STANZA(dcrn,val)
#endif
#define get_mal_dcrn(mal, dcrn) ({ \
u32 x; \
switch ((mal)->dcrbase) { \
GET_MAL0_STANZA(dcrn) \
GET_MAL1_STANZA(dcrn) \
default: \
BUG(); \
} \
x; })
#define set_mal_dcrn(mal, dcrn, val) do { \
switch ((mal)->dcrbase) { \
SET_MAL0_STANZA(dcrn,val) \
SET_MAL1_STANZA(dcrn,val) \
default: \
BUG(); \
} } while (0)
static
inline
void
mal_enable_tx_channels
(
struct
ibm_ocp_mal
*
mal
,
u32
chanmask
)
{
set_mal_dcrn
(
mal
,
DCRN_MALTXCASR
,
get_mal_dcrn
(
mal
,
DCRN_MALTXCASR
)
|
chanmask
);
}
static
inline
void
mal_disable_tx_channels
(
struct
ibm_ocp_mal
*
mal
,
u32
chanmask
)
{
set_mal_dcrn
(
mal
,
DCRN_MALTXCARR
,
chanmask
);
}
static
inline
void
mal_enable_rx_channels
(
struct
ibm_ocp_mal
*
mal
,
u32
chanmask
)
{
set_mal_dcrn
(
mal
,
DCRN_MALRXCASR
,
get_mal_dcrn
(
mal
,
DCRN_MALRXCASR
)
|
chanmask
);
}
static
inline
void
mal_disable_rx_channels
(
struct
ibm_ocp_mal
*
mal
,
u32
chanmask
)
{
set_mal_dcrn
(
mal
,
DCRN_MALRXCARR
,
chanmask
);
}
extern
int
mal_register_commac
(
struct
ibm_ocp_mal
*
mal
,
struct
mal_commac
*
commac
);
extern
int
mal_unregister_commac
(
struct
ibm_ocp_mal
*
mal
,
struct
mal_commac
*
commac
);
extern
int
mal_set_rcbs
(
struct
ibm_ocp_mal
*
mal
,
int
channel
,
unsigned
long
size
);
#endif
/* _IBM_EMAC_MAL_H */
drivers/net/ibm_emac/ibm_emac_phy.c
0 → 100644
View file @
ee86da9d
/*
* ibm_ocp_phy.c
*
* PHY drivers for the ibm ocp ethernet driver. Borrowed
* from sungem_phy.c, though I only kept the generic MII
* driver for now.
*
* This file should be shared with other drivers or eventually
* merged as the "low level" part of miilib
*
* (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include "ibm_emac_phy.h"
static
int
reset_one_mii_phy
(
struct
mii_phy
*
phy
,
int
phy_id
)
{
u16
val
;
int
limit
=
10000
;
val
=
__phy_read
(
phy
,
phy_id
,
MII_BMCR
);
val
&=
~
BMCR_ISOLATE
;
val
|=
BMCR_RESET
;
__phy_write
(
phy
,
phy_id
,
MII_BMCR
,
val
);
udelay
(
100
);
while
(
limit
--
)
{
val
=
__phy_read
(
phy
,
phy_id
,
MII_BMCR
);
if
((
val
&
BMCR_RESET
)
==
0
)
break
;
udelay
(
10
);
}
if
((
val
&
BMCR_ISOLATE
)
&&
limit
>
0
)
__phy_write
(
phy
,
phy_id
,
MII_BMCR
,
val
&
~
BMCR_ISOLATE
);
return
(
limit
<=
0
);
}
static
int
cis8201_init
(
struct
mii_phy
*
phy
)
{
u16
epcr
;
epcr
=
phy_read
(
phy
,
MII_CIS8201_EPCR
);
epcr
&=
~
EPCR_MODE_MASK
;
switch
(
phy
->
mode
)
{
case
PHY_MODE_TBI
:
epcr
|=
EPCR_TBI_MODE
;
break
;
case
PHY_MODE_RTBI
:
epcr
|=
EPCR_RTBI_MODE
;
break
;
case
PHY_MODE_GMII
:
epcr
|=
EPCR_GMII_MODE
;
break
;
case
PHY_MODE_RGMII
:
default:
epcr
|=
EPCR_RGMII_MODE
;
}
phy_write
(
phy
,
MII_CIS8201_EPCR
,
epcr
);
return
0
;
}
static
int
genmii_setup_aneg
(
struct
mii_phy
*
phy
,
u32
advertise
)
{
u16
ctl
,
adv
;
phy
->
autoneg
=
1
;
phy
->
speed
=
SPEED_10
;
phy
->
duplex
=
DUPLEX_HALF
;
phy
->
pause
=
0
;
phy
->
advertising
=
advertise
;
/* Setup standard advertise */
adv
=
phy_read
(
phy
,
MII_ADVERTISE
);
adv
&=
~
(
ADVERTISE_ALL
|
ADVERTISE_100BASE4
);
if
(
advertise
&
ADVERTISED_10baseT_Half
)
adv
|=
ADVERTISE_10HALF
;
if
(
advertise
&
ADVERTISED_10baseT_Full
)
adv
|=
ADVERTISE_10FULL
;
if
(
advertise
&
ADVERTISED_100baseT_Half
)
adv
|=
ADVERTISE_100HALF
;
if
(
advertise
&
ADVERTISED_100baseT_Full
)
adv
|=
ADVERTISE_100FULL
;
phy_write
(
phy
,
MII_ADVERTISE
,
adv
);
/* Start/Restart aneg */
ctl
=
phy_read
(
phy
,
MII_BMCR
);
ctl
|=
(
BMCR_ANENABLE
|
BMCR_ANRESTART
);
phy_write
(
phy
,
MII_BMCR
,
ctl
);
return
0
;
}
static
int
genmii_setup_forced
(
struct
mii_phy
*
phy
,
int
speed
,
int
fd
)
{
u16
ctl
;
phy
->
autoneg
=
0
;
phy
->
speed
=
speed
;
phy
->
duplex
=
fd
;
phy
->
pause
=
0
;
ctl
=
phy_read
(
phy
,
MII_BMCR
);
ctl
&=
~
(
BMCR_FULLDPLX
|
BMCR_SPEED100
|
BMCR_ANENABLE
);
/* First reset the PHY */
phy_write
(
phy
,
MII_BMCR
,
ctl
|
BMCR_RESET
);
/* Select speed & duplex */
switch
(
speed
)
{
case
SPEED_10
:
break
;
case
SPEED_100
:
ctl
|=
BMCR_SPEED100
;
break
;
case
SPEED_1000
:
default:
return
-
EINVAL
;
}
if
(
fd
==
DUPLEX_FULL
)
ctl
|=
BMCR_FULLDPLX
;
phy_write
(
phy
,
MII_BMCR
,
ctl
);
return
0
;
}
static
int
genmii_poll_link
(
struct
mii_phy
*
phy
)
{
u16
status
;
(
void
)
phy_read
(
phy
,
MII_BMSR
);
status
=
phy_read
(
phy
,
MII_BMSR
);
if
((
status
&
BMSR_LSTATUS
)
==
0
)
return
0
;
if
(
phy
->
autoneg
&&
!
(
status
&
BMSR_ANEGCOMPLETE
))
return
0
;
return
1
;
}
#define MII_CIS8201_ACSR 0x1c
#define ACSR_DUPLEX_STATUS 0x0020
#define ACSR_SPEED_1000BASET 0x0010
#define ACSR_SPEED_100BASET 0x0008
static
int
cis8201_read_link
(
struct
mii_phy
*
phy
)
{
u16
acsr
;
if
(
phy
->
autoneg
)
{
acsr
=
phy_read
(
phy
,
MII_CIS8201_ACSR
);
if
(
acsr
&
ACSR_DUPLEX_STATUS
)
phy
->
duplex
=
DUPLEX_FULL
;
else
phy
->
duplex
=
DUPLEX_HALF
;
if
(
acsr
&
ACSR_SPEED_1000BASET
)
{
phy
->
speed
=
SPEED_1000
;
}
else
if
(
acsr
&
ACSR_SPEED_100BASET
)
phy
->
speed
=
SPEED_100
;
else
phy
->
speed
=
SPEED_10
;
phy
->
pause
=
0
;
}
/* On non-aneg, we assume what we put in BMCR is the speed,
* though magic-aneg shouldn't prevent this case from occurring
*/
return
0
;
}
static
int
genmii_read_link
(
struct
mii_phy
*
phy
)
{
u16
lpa
;
if
(
phy
->
autoneg
)
{
lpa
=
phy_read
(
phy
,
MII_LPA
);
if
(
lpa
&
(
LPA_10FULL
|
LPA_100FULL
))
phy
->
duplex
=
DUPLEX_FULL
;
else
phy
->
duplex
=
DUPLEX_HALF
;
if
(
lpa
&
(
LPA_100FULL
|
LPA_100HALF
))
phy
->
speed
=
SPEED_100
;
else
phy
->
speed
=
SPEED_10
;
phy
->
pause
=
0
;
}
/* On non-aneg, we assume what we put in BMCR is the speed,
* though magic-aneg shouldn't prevent this case from occurring
*/
return
0
;
}
#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII)
#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
/* CIS8201 phy ops */
static
struct
mii_phy_ops
cis8201_phy_ops
=
{
init:
cis8201_init
,
setup_aneg:
genmii_setup_aneg
,
setup_forced:
genmii_setup_forced
,
poll_link:
genmii_poll_link
,
read_link:
cis8201_read_link
};
/* Generic implementation for most 10/100 PHYs */
static
struct
mii_phy_ops
generic_phy_ops
=
{
setup_aneg:
genmii_setup_aneg
,
setup_forced:
genmii_setup_forced
,
poll_link:
genmii_poll_link
,
read_link:
genmii_read_link
};
static
struct
mii_phy_def
cis8201_phy_def
=
{
phy_id:
0x000fc410
,
phy_id_mask:
0x000ffff0
,
name:
"CIS8201 Gigabit Ethernet"
,
features:
MII_GBIT_FEATURES
,
magic_aneg:
0
,
ops:
&
cis8201_phy_ops
};
static
struct
mii_phy_def
genmii_phy_def
=
{
phy_id:
0x00000000
,
phy_id_mask:
0x00000000
,
name:
"Generic MII"
,
features:
MII_BASIC_FEATURES
,
magic_aneg:
0
,
ops:
&
generic_phy_ops
};
static
struct
mii_phy_def
*
mii_phy_table
[]
=
{
&
cis8201_phy_def
,
&
genmii_phy_def
,
NULL
};
int
mii_phy_probe
(
struct
mii_phy
*
phy
,
int
mii_id
)
{
int
rc
;
u32
id
;
struct
mii_phy_def
*
def
;
int
i
;
phy
->
autoneg
=
0
;
phy
->
advertising
=
0
;
phy
->
mii_id
=
mii_id
;
phy
->
speed
=
0
;
phy
->
duplex
=
0
;
phy
->
pause
=
0
;
/* Take PHY out of isloate mode and reset it. */
rc
=
reset_one_mii_phy
(
phy
,
mii_id
);
if
(
rc
)
return
-
ENODEV
;
/* Read ID and find matching entry */
id
=
(
phy_read
(
phy
,
MII_PHYSID1
)
<<
16
|
phy_read
(
phy
,
MII_PHYSID2
))
&
0xfffffff0
;
for
(
i
=
0
;
(
def
=
mii_phy_table
[
i
])
!=
NULL
;
i
++
)
if
((
id
&
def
->
phy_id_mask
)
==
def
->
phy_id
)
break
;
/* Should never be NULL (we have a generic entry), but... */
if
(
def
==
NULL
)
return
-
ENODEV
;
phy
->
def
=
def
;
/* Setup default advertising */
phy
->
advertising
=
def
->
features
;
return
0
;
}
MODULE_LICENSE
(
"GPL"
);
drivers/net/ibm_emac/ibm_emac_phy.h
0 → 100644
View file @
ee86da9d
/*
* ibm_emac_phy.h
*
*
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
* February 2003
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* This file basically duplicates sungem_phy.{c,h} with different PHYs
* supported. I'm looking into merging that in a single mii layer more
* flexible than mii.c
*/
#ifndef _IBM_EMAC_PHY_H_
#define _IBM_EMAC_PHY_H_
/*
* PHY mode settings
* Used for multi-mode capable PHYs
*/
#define PHY_MODE_NA 0
#define PHY_MODE_MII 1
#define PHY_MODE_RMII 2
#define PHY_MODE_SMII 3
#define PHY_MODE_RGMII 4
#define PHY_MODE_TBI 5
#define PHY_MODE_GMII 6
#define PHY_MODE_RTBI 7
#define PHY_MODE_SGMII 8
/*
* PHY specific registers/values
*/
/* CIS8201 */
#define MII_CIS8201_EPCR 0x17
#define EPCR_MODE_MASK 0x3000
#define EPCR_GMII_MODE 0x0000
#define EPCR_RGMII_MODE 0x1000
#define EPCR_TBI_MODE 0x2000
#define EPCR_RTBI_MODE 0x3000
struct
mii_phy
;
/* Operations supported by any kind of PHY */
struct
mii_phy_ops
{
int
(
*
init
)
(
struct
mii_phy
*
phy
);
int
(
*
suspend
)
(
struct
mii_phy
*
phy
,
int
wol_options
);
int
(
*
setup_aneg
)
(
struct
mii_phy
*
phy
,
u32
advertise
);
int
(
*
setup_forced
)
(
struct
mii_phy
*
phy
,
int
speed
,
int
fd
);
int
(
*
poll_link
)
(
struct
mii_phy
*
phy
);
int
(
*
read_link
)
(
struct
mii_phy
*
phy
);
};
/* Structure used to statically define an mii/gii based PHY */
struct
mii_phy_def
{
u32
phy_id
;
/* Concatenated ID1 << 16 | ID2 */
u32
phy_id_mask
;
/* Significant bits */
u32
features
;
/* Ethtool SUPPORTED_* defines */
int
magic_aneg
;
/* Autoneg does all speed test for us */
const
char
*
name
;
const
struct
mii_phy_ops
*
ops
;
};
/* An instance of a PHY, partially borrowed from mii_if_info */
struct
mii_phy
{
struct
mii_phy_def
*
def
;
int
advertising
;
int
mii_id
;
/* 1: autoneg enabled, 0: disabled */
int
autoneg
;
/* forced speed & duplex (no autoneg)
* partner speed & duplex & pause (autoneg)
*/
int
speed
;
int
duplex
;
int
pause
;
/* PHY mode - if needed */
int
mode
;
/* Provided by host chip */
struct
net_device
*
dev
;
int
(
*
mdio_read
)
(
struct
net_device
*
dev
,
int
mii_id
,
int
reg
);
void
(
*
mdio_write
)
(
struct
net_device
*
dev
,
int
mii_id
,
int
reg
,
int
val
);
};
/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
* filled, the remaining fields will be filled on return
*/
extern
int
mii_phy_probe
(
struct
mii_phy
*
phy
,
int
mii_id
);
static
inline
int
__phy_read
(
struct
mii_phy
*
phy
,
int
id
,
int
reg
)
{
return
phy
->
mdio_read
(
phy
->
dev
,
id
,
reg
);
}
static
inline
void
__phy_write
(
struct
mii_phy
*
phy
,
int
id
,
int
reg
,
int
val
)
{
phy
->
mdio_write
(
phy
->
dev
,
id
,
reg
,
val
);
}
static
inline
int
phy_read
(
struct
mii_phy
*
phy
,
int
reg
)
{
return
phy
->
mdio_read
(
phy
->
dev
,
phy
->
mii_id
,
reg
);
}
static
inline
void
phy_write
(
struct
mii_phy
*
phy
,
int
reg
,
int
val
)
{
phy
->
mdio_write
(
phy
->
dev
,
phy
->
mii_id
,
reg
,
val
);
}
#endif
/* _IBM_EMAC_PHY_H_ */
drivers/net/ibm_emac/ibm_emac_rgmii.h
0 → 100644
View file @
ee86da9d
/*
* Defines for the IBM RGMII bridge
*
* Based on ocp_zmii.h/ibm_emac_zmii.h
* Armin Kuster akuster@mvista.com
*
* Copyright 2004 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _IBM_EMAC_RGMII_H_
#define _IBM_EMAC_RGMII_H_
#include <linux/config.h>
/* RGMII bridge */
typedef
struct
rgmii_regs
{
u32
fer
;
/* Function enable register */
u32
ssr
;
/* Speed select register */
}
rgmii_t
;
#define RGMII_INPUTS 4
/* RGMII device */
struct
ibm_ocp_rgmii
{
struct
rgmii_regs
*
base
;
int
mode
[
RGMII_INPUTS
];
int
users
;
/* number of EMACs using this RGMII bridge */
};
/* Fuctional Enable Reg */
#define RGMII_FER_MASK(x) (0x00000007 << (4*x))
#define RGMII_RTBI 0x00000004
#define RGMII_RGMII 0x00000005
#define RGMII_TBI 0x00000006
#define RGMII_GMII 0x00000007
/* Speed Selection reg */
#define RGMII_SP2_100 0x00000002
#define RGMII_SP2_1000 0x00000004
#define RGMII_SP3_100 0x00000200
#define RGMII_SP3_1000 0x00000400
#define RGMII_MII2_SPDMASK 0x00000007
#define RGMII_MII3_SPDMASK 0x00000700
#define RGMII_MII2_100MB RGMII_SP2_100 & ~RGMII_SP2_1000
#define RGMII_MII2_1000MB RGMII_SP2_1000 & ~RGMII_SP2_100
#define RGMII_MII2_10MB ~(RGMII_SP2_100 | RGMII_SP2_1000)
#define RGMII_MII3_100MB RGMII_SP3_100 & ~RGMII_SP3_1000
#define RGMII_MII3_1000MB RGMII_SP3_1000 & ~RGMII_SP3_100
#define RGMII_MII3_10MB ~(RGMII_SP3_100 | RGMII_SP3_1000)
#define RTBI 0
#define RGMII 1
#define TBI 2
#define GMII 3
#endif
/* _IBM_EMAC_RGMII_H_ */
drivers/net/ibm_emac/ibm_emac_tah.h
0 → 100644
View file @
ee86da9d
/*
* Defines for the IBM TAH
*
* Copyright 2004 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _IBM_EMAC_TAH_H
#define _IBM_EMAC_TAH_H
/* TAH */
typedef
struct
tah_regs
{
u32
tah_revid
;
u32
pad
[
3
];
u32
tah_mr
;
u32
tah_ssr0
;
u32
tah_ssr1
;
u32
tah_ssr2
;
u32
tah_ssr3
;
u32
tah_ssr4
;
u32
tah_ssr5
;
u32
tah_tsr
;
}
tah_t
;
/* TAH engine */
#define TAH_MR_CVR 0x80000000
#define TAH_MR_SR 0x40000000
#define TAH_MR_ST_256 0x01000000
#define TAH_MR_ST_512 0x02000000
#define TAH_MR_ST_768 0x03000000
#define TAH_MR_ST_1024 0x04000000
#define TAH_MR_ST_1280 0x05000000
#define TAH_MR_ST_1536 0x06000000
#define TAH_MR_TFS_16KB 0x00000000
#define TAH_MR_TFS_2KB 0x00200000
#define TAH_MR_TFS_4KB 0x00400000
#define TAH_MR_TFS_6KB 0x00600000
#define TAH_MR_TFS_8KB 0x00800000
#define TAH_MR_TFS_10KB 0x00a00000
#define TAH_MR_DTFP 0x00100000
#define TAH_MR_DIG 0x00080000
#endif
/* _IBM_EMAC_TAH_H */
drivers/net/ibm_emac/ibm_emac_zmii.h
0 → 100644
View file @
ee86da9d
/*
* ocp_zmii.h
*
* Defines for the IBM ZMII bridge
*
* Armin Kuster akuster@mvista.com
* Dec, 2001
*
* Copyright 2001 MontaVista Softare Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _IBM_EMAC_ZMII_H_
#define _IBM_EMAC_ZMII_H_
#include <linux/config.h>
/* ZMII bridge registers */
struct
zmii_regs
{
u32
fer
;
/* Function enable reg */
u32
ssr
;
/* Speed select reg */
u32
smiirs
;
/* SMII status reg */
};
#define ZMII_INPUTS 4
/* ZMII device */
struct
ibm_ocp_zmii
{
struct
zmii_regs
*
base
;
int
mode
[
ZMII_INPUTS
];
int
users
;
/* number of EMACs using this ZMII bridge */
};
/* Fuctional Enable Reg */
#define ZMII_FER_MASK(x) (0xf0000000 >> (4*x))
#define ZMII_MDI0 0x80000000
#define ZMII_SMII0 0x40000000
#define ZMII_RMII0 0x20000000
#define ZMII_MII0 0x10000000
#define ZMII_MDI1 0x08000000
#define ZMII_SMII1 0x04000000
#define ZMII_RMII1 0x02000000
#define ZMII_MII1 0x01000000
#define ZMII_MDI2 0x00800000
#define ZMII_SMII2 0x00400000
#define ZMII_RMII2 0x00200000
#define ZMII_MII2 0x00100000
#define ZMII_MDI3 0x00080000
#define ZMII_SMII3 0x00040000
#define ZMII_RMII3 0x00020000
#define ZMII_MII3 0x00010000
/* Speed Selection reg */
#define ZMII_SCI0 0x40000000
#define ZMII_FSS0 0x20000000
#define ZMII_SP0 0x10000000
#define ZMII_SCI1 0x04000000
#define ZMII_FSS1 0x02000000
#define ZMII_SP1 0x01000000
#define ZMII_SCI2 0x00400000
#define ZMII_FSS2 0x00200000
#define ZMII_SP2 0x00100000
#define ZMII_SCI3 0x00040000
#define ZMII_FSS3 0x00020000
#define ZMII_SP3 0x00010000
#define ZMII_MII0_100MB ZMII_SP0
#define ZMII_MII0_10MB ~ZMII_SP0
#define ZMII_MII1_100MB ZMII_SP1
#define ZMII_MII1_10MB ~ZMII_SP1
#define ZMII_MII2_100MB ZMII_SP2
#define ZMII_MII2_10MB ~ZMII_SP2
#define ZMII_MII3_100MB ZMII_SP3
#define ZMII_MII3_10MB ~ZMII_SP3
/* SMII Status reg */
#define ZMII_STS0 0xFF000000
/* EMAC0 smii status mask */
#define ZMII_STS1 0x00FF0000
/* EMAC1 smii status mask */
#define SMII 0
#define RMII 1
#define MII 2
#define MDI 3
#endif
/* _IBM_EMAC_ZMII_H_ */
drivers/net/wan/farsync.c
View file @
ee86da9d
/*
/*
* FarSync
X21 driver for Linux (generic HDLC
version)
* FarSync
WAN driver for Linux (2.6.x kernel
version)
*
*
* Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
* Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
*
*
* Copyright (C) 2001 FarSite Communications Ltd.
* Copyright (C) 2001
-2004
FarSite Communications Ltd.
* www.farsite.co.uk
* www.farsite.co.uk
*
*
* This program is free software; you can redistribute it and/or
* This program is free software; you can redistribute it and/or
...
@@ -12,10 +12,12 @@
...
@@ -12,10 +12,12 @@
* 2 of the License, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*
*
* Author: R.J.Dunlop <bob.dunlop@farsite.co.uk>
* Author: R.J.Dunlop <bob.dunlop@farsite.co.uk>
* Maintainer: Kevin Curtis <kevin.curtis@farsite.co.uk>
*/
*/
#include <linux/module.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/pci.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/init.h>
...
@@ -25,29 +27,26 @@
...
@@ -25,29 +27,26 @@
#include "farsync.h"
#include "farsync.h"
/*
/*
* Module info
* Module info
*/
*/
MODULE_AUTHOR
(
"R.J.Dunlop <bob.dunlop@farsite.co.uk>"
);
MODULE_AUTHOR
(
"R.J.Dunlop <bob.dunlop@farsite.co.uk>"
);
MODULE_DESCRIPTION
(
"FarSync T-Series X21 driver. FarSite Communications Ltd."
);
MODULE_DESCRIPTION
(
"FarSync T-Series WAN driver. FarSite Communications Ltd."
);
MODULE_PARM
(
fst_txq_low
,
"i"
);
MODULE_PARM
(
fst_txq_high
,
"i"
);
MODULE_PARM
(
fst_max_reads
,
"i"
);
MODULE_PARM
(
fst_excluded_cards
,
"i"
);
MODULE_PARM
(
fst_excluded_list
,
"0-32i"
);
MODULE_LICENSE
(
"GPL"
);
MODULE_LICENSE
(
"GPL"
);
/* Driver configuration and global parameters
/* Driver configuration and global parameters
* ==========================================
* ==========================================
*/
*/
/* Number of ports (per card) supported
/* Number of ports (per card)
and cards
supported
*/
*/
#define FST_MAX_PORTS 4
#define FST_MAX_PORTS 4
#define FST_MAX_CARDS 32
/* PCI vendor and device IDs
*/
#define FSC_PCI_VENDOR_ID 0x1619
/* FarSite Communications Ltd */
#define T2P_PCI_DEVICE_ID 0x0400
/* T2P X21 2 port card */
#define T4P_PCI_DEVICE_ID 0x0440
/* T4P X21 4 port card */
/* Default parameters for the link
/* Default parameters for the link
*/
*/
...
@@ -56,18 +55,34 @@ MODULE_LICENSE("GPL");
...
@@ -56,18 +55,34 @@ MODULE_LICENSE("GPL");
* this down assuming a slower line I
* this down assuming a slower line I
* guess.
* guess.
*/
*/
#define FST_TXQ_DEPTH 16
/* This one is for the buffering
* of frames on the way down to the card
* so that we can keep the card busy
* and maximise throughput
*/
#define FST_HIGH_WATER_MARK 12
/* Point at which we flow control
* network layer */
#define FST_LOW_WATER_MARK 8
/* Point at which we remove flow
* control from network layer */
#define FST_MAX_MTU 8000
/* Huge but possible */
#define FST_MAX_MTU 8000
/* Huge but possible */
#define FST_DEF_MTU 1500
/* Common sane value */
#define FST_DEF_MTU 1500
/* Common sane value */
#define FST_TX_TIMEOUT (2*HZ)
#define FST_TX_TIMEOUT (2*HZ)
#ifdef ARPHRD_RAWHDLC
#ifdef ARPHRD_RAWHDLC
#define ARPHRD_MYTYPE ARPHRD_RAWHDLC
/* Raw frames */
#define ARPHRD_MYTYPE ARPHRD_RAWHDLC
/* Raw frames */
#else
#else
#define ARPHRD_MYTYPE ARPHRD_HDLC
/* Cisco-HDLC (keepalives etc) */
#define ARPHRD_MYTYPE ARPHRD_HDLC
/* Cisco-HDLC (keepalives etc) */
#endif
#endif
/*
* Modules parameters and associated varaibles
*/
int
fst_txq_low
=
FST_LOW_WATER_MARK
;
int
fst_txq_high
=
FST_HIGH_WATER_MARK
;
int
fst_max_reads
=
7
;
int
fst_excluded_cards
=
0
;
int
fst_excluded_list
[
FST_MAX_CARDS
];
/* Card shared memory layout
/* Card shared memory layout
* =========================
* =========================
...
@@ -84,7 +99,7 @@ MODULE_LICENSE("GPL");
...
@@ -84,7 +99,7 @@ MODULE_LICENSE("GPL");
* be used to check that we have not got out of step with the firmware
* be used to check that we have not got out of step with the firmware
* contained in the .CDE files.
* contained in the .CDE files.
*/
*/
#define SMC_VERSION
11
#define SMC_VERSION
24
#define FST_MEMSIZE 0x100000
/* Size of card memory (1Mb) */
#define FST_MEMSIZE 0x100000
/* Size of card memory (1Mb) */
...
@@ -105,7 +120,6 @@ MODULE_LICENSE("GPL");
...
@@ -105,7 +120,6 @@ MODULE_LICENSE("GPL");
/* Interrupt retry time in milliseconds */
/* Interrupt retry time in milliseconds */
#define INT_RETRY_TIME 2
#define INT_RETRY_TIME 2
/* The Am186CH/CC processors support a SmartDMA mode using circular pools
/* The Am186CH/CC processors support a SmartDMA mode using circular pools
* of buffer descriptors. The structure is almost identical to that used
* of buffer descriptors. The structure is almost identical to that used
* in the LANCE Ethernet controllers. Details available as PDF from the
* in the LANCE Ethernet controllers. Details available as PDF from the
...
@@ -157,8 +171,7 @@ struct rxdesc { /* Receive descriptor */
...
@@ -157,8 +171,7 @@ struct rxdesc { /* Receive descriptor */
#define RX_STP 0x02
/* Rx: start of packet */
#define RX_STP 0x02
/* Rx: start of packet */
#define RX_ENP 0x01
/* Rx: end of packet */
#define RX_ENP 0x01
/* Rx: end of packet */
/* Interrupts from the card are caused by various events which are presented
/* Interrupts from the card are caused by various events and these are presented
* in a circular buffer as several events may be processed on one physical int
* in a circular buffer as several events may be processed on one physical int
*/
*/
#define MAX_CIRBUFF 32
#define MAX_CIRBUFF 32
...
@@ -190,15 +203,58 @@ struct cirbuff {
...
@@ -190,15 +203,58 @@ struct cirbuff {
#define TXC_UNDF 0x2A
#define TXC_UNDF 0x2A
#define TXD_UNDF 0x2B
#define TXD_UNDF 0x2B
#define F56_INT 0x2C
#define M32_INT 0x2D
#define TE1_ALMA 0x30
/* Port physical configuration. See farsync.h for field values */
/* Port physical configuration. See farsync.h for field values */
struct
port_cfg
{
struct
port_cfg
{
u16
lineInterface
;
/* Physical interface type */
u16
lineInterface
;
/* Physical interface type */
u8
x25op
;
/* Unused at present */
u8
x25op
;
/* Unused at present */
u8
internalClock
;
/* 1 => internal clock, 0 => external */
u8
internalClock
;
/* 1 => internal clock, 0 => external */
u8
transparentMode
;
/* 1 => on, 0 => off */
u8
invertClock
;
/* 0 => normal, 1 => inverted */
u8
padBytes
[
6
];
/* Padding */
u32
lineSpeed
;
/* Speed in bps */
u32
lineSpeed
;
/* Speed in bps */
};
};
/* TE1 port physical configuration */
struct
su_config
{
u32
dataRate
;
u8
clocking
;
u8
framing
;
u8
structure
;
u8
interface
;
u8
coding
;
u8
lineBuildOut
;
u8
equalizer
;
u8
transparentMode
;
u8
loopMode
;
u8
range
;
u8
txBufferMode
;
u8
rxBufferMode
;
u8
startingSlot
;
u8
losThreshold
;
u8
enableIdleCode
;
u8
idleCode
;
u8
spare
[
44
];
};
/* TE1 Status */
struct
su_status
{
u32
receiveBufferDelay
;
u32
framingErrorCount
;
u32
codeViolationCount
;
u32
crcErrorCount
;
u32
lineAttenuation
;
u8
portStarted
;
u8
lossOfSignal
;
u8
receiveRemoteAlarm
;
u8
alarmIndicationSignal
;
u8
spare
[
40
];
};
/* Finally sling all the above together into the shared memory structure.
/* Finally sling all the above together into the shared memory structure.
* Sorry it's a hodge podge of arrays, structures and unused bits, it's been
* Sorry it's a hodge podge of arrays, structures and unused bits, it's been
* evolving under NT for some time so I guess we're stuck with it.
* evolving under NT for some time so I guess we're stuck with it.
...
@@ -256,14 +312,14 @@ struct fst_shared {
...
@@ -256,14 +312,14 @@ struct fst_shared {
u16
portMailbox
[
FST_MAX_PORTS
][
2
];
/* command, modifier */
u16
portMailbox
[
FST_MAX_PORTS
][
2
];
/* command, modifier */
u16
cardMailbox
[
4
];
/* Not used */
u16
cardMailbox
[
4
];
/* Not used */
/* Number of times that
card thinks the host has
/* Number of times the
card thinks the host has
* missed an interrupt by not acknowledging
* missed an interrupt by not acknowledging
* within 2mS (I guess NT has problems)
* within 2mS (I guess NT has problems)
*/
*/
u32
interruptRetryCount
;
u32
interruptRetryCount
;
/* Driver private data used as an ID. We'll not
/* Driver private data used as an ID. We'll not
* use this on Linux
I'd rather keep such things
* use this as
I'd rather keep such things
* in main memory rather than on the PCI bus
* in main memory rather than on the PCI bus
*/
*/
u32
portHandle
[
FST_MAX_PORTS
];
u32
portHandle
[
FST_MAX_PORTS
];
...
@@ -290,9 +346,12 @@ struct fst_shared {
...
@@ -290,9 +346,12 @@ struct fst_shared {
u16
portScheduleOffset
;
u16
portScheduleOffset
;
struct
su_config
suConfig
;
/* TE1 Bits */
struct
su_status
suStatus
;
u32
endOfSmcSignature
;
/* endOfSmcSignature MUST be the last member of
u32
endOfSmcSignature
;
/* endOfSmcSignature MUST be the last member of
* the structure and marks the end of the
shared
* the structure and marks the end of
shared
* memory. Adapter code initializes its value
as
* memory. Adapter code initializes it
as
* END_SIG.
* END_SIG.
*/
*/
};
};
...
@@ -309,6 +368,40 @@ struct fst_shared {
...
@@ -309,6 +368,40 @@ struct fst_shared {
#define ABORTTX 5
/* Abort the transmitter for a port */
#define ABORTTX 5
/* Abort the transmitter for a port */
#define SETV24O 6
/* Set V24 outputs */
#define SETV24O 6
/* Set V24 outputs */
/* PLX Chip Register Offsets */
#define CNTRL_9052 0x50
/* Control Register */
#define CNTRL_9054 0x6c
/* Control Register */
#define INTCSR_9052 0x4c
/* Interrupt control/status register */
#define INTCSR_9054 0x68
/* Interrupt control/status register */
/* 9054 DMA Registers */
/*
* Note that we will be using DMA Channel 0 for copying rx data
* and Channel 1 for copying tx data
*/
#define DMAMODE0 0x80
#define DMAPADR0 0x84
#define DMALADR0 0x88
#define DMASIZ0 0x8c
#define DMADPR0 0x90
#define DMAMODE1 0x94
#define DMAPADR1 0x98
#define DMALADR1 0x9c
#define DMASIZ1 0xa0
#define DMADPR1 0xa4
#define DMACSR0 0xa8
#define DMACSR1 0xa9
#define DMAARB 0xac
#define DMATHR 0xb0
#define DMADAC0 0xb4
#define DMADAC1 0xb8
#define DMAMARBR 0xac
#define FST_MIN_DMA_LEN 64
#define FST_RX_DMA_INT 0x01
#define FST_TX_DMA_INT 0x02
#define FST_CARD_INT 0x04
/* Larger buffers are positioned in memory at offset BFM_BASE */
/* Larger buffers are positioned in memory at offset BFM_BASE */
struct
buf_window
{
struct
buf_window
{
...
@@ -317,26 +410,33 @@ struct buf_window {
...
@@ -317,26 +410,33 @@ struct buf_window {
};
};
/* Calculate offset of a buffer object within the shared memory window */
/* Calculate offset of a buffer object within the shared memory window */
#define BUF_OFFSET(X)
offsetof(struct buf_window, X
)
#define BUF_OFFSET(X)
((unsigned int)&(((struct buf_window *)BFM_BASE)->X)
)
#pragma pack()
#pragma pack()
/* Device driver private information
/* Device driver private information
* =================================
* =================================
*/
*/
/* Per port (line or channel) information
/* Per port (line or channel) information
*/
*/
struct
fst_port_info
{
struct
fst_port_info
{
struct
net_device
*
dev
;
struct
net_device
*
dev
;
/* Device struct - must be first */
struct
fst_card_info
*
card
;
/* Card we're associated with */
struct
fst_card_info
*
card
;
/* Card we're associated with */
int
index
;
/* Port index on the card */
int
index
;
/* Port index on the card */
int
hwif
;
/* Line hardware (lineInterface copy) */
int
hwif
;
/* Line hardware (lineInterface copy) */
int
run
;
/* Port is running */
int
run
;
/* Port is running */
int
mode
;
/* Normal or FarSync raw */
int
rxpos
;
/* Next Rx buffer to use */
int
rxpos
;
/* Next Rx buffer to use */
int
txpos
;
/* Next Tx buffer to use */
int
txpos
;
/* Next Tx buffer to use */
int
txipos
;
/* Next Tx buffer to check for free */
int
txipos
;
/* Next Tx buffer to check for free */
int
txcnt
;
/* Count of Tx buffers in use */
int
start
;
/* Indication of start/stop to network */
/*
* A sixteen entry transmit queue
*/
int
txqs
;
/* index to get next buffer to tx */
int
txqe
;
/* index to queue next packet */
struct
sk_buff
*
txq
[
FST_TXQ_DEPTH
];
/* The queue */
int
rxqdepth
;
};
};
/* Per card information
/* Per card information
...
@@ -353,7 +453,25 @@ struct fst_card_info {
...
@@ -353,7 +453,25 @@ struct fst_card_info {
spinlock_t
card_lock
;
/* Lock for SMP access */
spinlock_t
card_lock
;
/* Lock for SMP access */
unsigned
short
pci_conf
;
/* PCI card config in I/O space */
unsigned
short
pci_conf
;
/* PCI card config in I/O space */
/* Per port info */
/* Per port info */
struct
fst_port_info
ports
[
FST_MAX_PORTS
];
struct
fst_port_info
ports
[
FST_MAX_PORTS
];
struct
pci_dev
*
device
;
/* Information about the pci device */
int
card_no
;
/* Inst of the card on the system */
int
family
;
/* TxP or TxU */
int
dmarx_in_progress
;
int
dmatx_in_progress
;
unsigned
long
int_count
;
unsigned
long
int_time_ave
;
void
*
rx_dma_handle_host
;
dma_addr_t
rx_dma_handle_card
;
void
*
tx_dma_handle_host
;
dma_addr_t
tx_dma_handle_card
;
struct
sk_buff
*
dma_skb_rx
;
struct
fst_port_info
*
dma_port_rx
;
struct
fst_port_info
*
dma_port_tx
;
int
dma_len_rx
;
int
dma_len_tx
;
int
dma_txpos
;
int
dma_rxpos
;
};
};
/* Convert an HDLC device pointer into a port info pointer and similar */
/* Convert an HDLC device pointer into a port info pointer and similar */
...
@@ -380,7 +498,6 @@ struct fst_card_info {
...
@@ -380,7 +498,6 @@ struct fst_card_info {
#define FST_WRW(C,E,W) writew ((W), (C)->mem + WIN_OFFSET(E))
#define FST_WRW(C,E,W) writew ((W), (C)->mem + WIN_OFFSET(E))
#define FST_WRL(C,E,L) writel ((L), (C)->mem + WIN_OFFSET(E))
#define FST_WRL(C,E,L) writel ((L), (C)->mem + WIN_OFFSET(E))
/*
/*
* Debug support
* Debug support
*/
*/
...
@@ -399,30 +516,151 @@ static int fst_debug_mask = { FST_DEBUG };
...
@@ -399,30 +516,151 @@ static int fst_debug_mask = { FST_DEBUG };
printk ( KERN_DEBUG FST_NAME ": " fmt, ## A )
printk ( KERN_DEBUG FST_NAME ": " fmt, ## A )
#else
#else
#
define dbg(X...)
/* NOP */
#
define dbg(X...)
/* NOP */
#endif
#endif
/* Printing short cuts
/* Printing short cuts
*/
*/
#define printk_err(fmt,A...) printk ( KERN_ERR FST_NAME ": " fmt, ## A )
#define printk_err(fmt,A...) printk ( KERN_ERR FST_NAME ": " fmt, ## A )
#define printk_warn(fmt,A...) printk ( KERN_WARNING FST_NAME ": " fmt, ## A )
#define printk_warn(fmt,A...) printk ( KERN_WARNING FST_NAME ": " fmt, ## A )
#define printk_info(fmt,A...) printk ( KERN_INFO FST_NAME ": " fmt, ## A )
#define printk_info(fmt,A...) printk ( KERN_INFO FST_NAME ": " fmt, ## A )
/*
/*
* PCI ID lookup table
* PCI ID lookup table
*/
*/
static
struct
pci_device_id
fst_pci_dev_id
[]
=
{
static
struct
pci_device_id
fst_pci_dev_id
[]
__devinitdata
=
{
{
FSC_PCI_VENDOR_ID
,
T2P_PCI_DEVICE_ID
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
{
PCI_VENDOR_ID_FARSITE
,
PCI_DEVICE_ID_FARSITE_T2P
,
PCI_ANY_ID
,
FST_TYPE_T2P
},
PCI_ANY_ID
,
0
,
0
,
FST_TYPE_T2P
},
{
FSC_PCI_VENDOR_ID
,
T4P_PCI_DEVICE_ID
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
FST_TYPE_T4P
},
{
PCI_VENDOR_ID_FARSITE
,
PCI_DEVICE_ID_FARSITE_T4P
,
PCI_ANY_ID
,
{
0
,
}
/* End */
PCI_ANY_ID
,
0
,
0
,
FST_TYPE_T4P
},
{
PCI_VENDOR_ID_FARSITE
,
PCI_DEVICE_ID_FARSITE_T1U
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
FST_TYPE_T1U
},
{
PCI_VENDOR_ID_FARSITE
,
PCI_DEVICE_ID_FARSITE_T2U
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
FST_TYPE_T2U
},
{
PCI_VENDOR_ID_FARSITE
,
PCI_DEVICE_ID_FARSITE_T4U
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
FST_TYPE_T4U
},
{
PCI_VENDOR_ID_FARSITE
,
PCI_DEVICE_ID_FARSITE_TE1
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
FST_TYPE_TE1
},
{
PCI_VENDOR_ID_FARSITE
,
PCI_DEVICE_ID_FARSITE_TE1C
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
FST_TYPE_TE1
},
{
0
,}
/* End */
};
};
MODULE_DEVICE_TABLE
(
pci
,
fst_pci_dev_id
);
MODULE_DEVICE_TABLE
(
pci
,
fst_pci_dev_id
);
/*
* Device Driver Work Queues
*
* So that we don't spend too much time processing events in the
* Interrupt Service routine, we will declare a work queue per Card
* and make the ISR schedule a task in the queue for later execution.
* In the 2.4 Kernel we used to use the immediate queue for BH's
* Now that they are gone, tasklets seem to be much better than work
* queues.
*/
static
void
do_bottom_half_tx
(
struct
fst_card_info
*
card
);
static
void
do_bottom_half_rx
(
struct
fst_card_info
*
card
);
static
void
fst_process_tx_work_q
(
unsigned
long
work_q
);
static
void
fst_process_int_work_q
(
unsigned
long
work_q
);
DECLARE_TASKLET
(
fst_tx_task
,
fst_process_tx_work_q
,
0
);
DECLARE_TASKLET
(
fst_int_task
,
fst_process_int_work_q
,
0
);
struct
fst_card_info
*
fst_card_array
[
FST_MAX_CARDS
];
spinlock_t
fst_work_q_lock
;
u64
fst_work_txq
;
u64
fst_work_intq
;
static
void
fst_q_work_item
(
u64
*
queue
,
int
card_index
)
{
unsigned
long
flags
;
u64
mask
;
/*
* Grab the queue exclusively
*/
spin_lock_irqsave
(
&
fst_work_q_lock
,
flags
);
/*
* Making an entry in the queue is simply a matter of setting
* a bit for the card indicating that there is work to do in the
* bottom half for the card. Note the limitation of 64 cards.
* That ought to be enough
*/
mask
=
1
<<
card_index
;
*
queue
|=
mask
;
spin_unlock_irqrestore
(
&
fst_work_q_lock
,
flags
);
}
static
void
fst_process_tx_work_q
(
unsigned
long
/*void **/
work_q
)
{
unsigned
long
flags
;
u64
work_txq
;
int
i
;
/*
* Grab the queue exclusively
*/
dbg
(
DBG_TX
,
"fst_process_tx_work_q
\n
"
);
spin_lock_irqsave
(
&
fst_work_q_lock
,
flags
);
work_txq
=
fst_work_txq
;
fst_work_txq
=
0
;
spin_unlock_irqrestore
(
&
fst_work_q_lock
,
flags
);
/*
* Call the bottom half for each card with work waiting
*/
for
(
i
=
0
;
i
<
FST_MAX_CARDS
;
i
++
)
{
if
(
work_txq
&
0x01
)
{
if
(
fst_card_array
[
i
]
!=
NULL
)
{
dbg
(
DBG_TX
,
"Calling tx bh for card %d
\n
"
,
i
);
do_bottom_half_tx
(
fst_card_array
[
i
]);
}
}
work_txq
=
work_txq
>>
1
;
}
}
static
void
fst_process_int_work_q
(
unsigned
long
/*void **/
work_q
)
{
unsigned
long
flags
;
u64
work_intq
;
int
i
;
/*
* Grab the queue exclusively
*/
dbg
(
DBG_INTR
,
"fst_process_int_work_q
\n
"
);
spin_lock_irqsave
(
&
fst_work_q_lock
,
flags
);
work_intq
=
fst_work_intq
;
fst_work_intq
=
0
;
spin_unlock_irqrestore
(
&
fst_work_q_lock
,
flags
);
/*
* Call the bottom half for each card with work waiting
*/
for
(
i
=
0
;
i
<
FST_MAX_CARDS
;
i
++
)
{
if
(
work_intq
&
0x01
)
{
if
(
fst_card_array
[
i
]
!=
NULL
)
{
dbg
(
DBG_INTR
,
"Calling rx & tx bh for card %d
\n
"
,
i
);
do_bottom_half_rx
(
fst_card_array
[
i
]);
do_bottom_half_tx
(
fst_card_array
[
i
]);
}
}
work_intq
=
work_intq
>>
1
;
}
}
/* Card control functions
/* Card control functions
* ======================
* ======================
...
@@ -432,52 +670,296 @@ MODULE_DEVICE_TABLE ( pci, fst_pci_dev_id );
...
@@ -432,52 +670,296 @@ MODULE_DEVICE_TABLE ( pci, fst_pci_dev_id );
* Used to be a simple write to card control space but a glitch in the latest
* Used to be a simple write to card control space but a glitch in the latest
* AMD Am186CH processor means that we now have to do it by asserting and de-
* AMD Am186CH processor means that we now have to do it by asserting and de-
* asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register
* asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register
* at offset
0x50
.
* at offset
9052_CNTRL. Note the updates for the TXU
.
*/
*/
static
inline
void
static
inline
void
fst_cpureset
(
struct
fst_card_info
*
card
)
fst_cpureset
(
struct
fst_card_info
*
card
)
{
{
unsigned
char
interrupt_line_register
;
unsigned
long
j
=
jiffies
+
1
;
unsigned
int
regval
;
unsigned
int
regval
;
regval
=
inl
(
card
->
pci_conf
+
0x50
);
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
if
(
pci_read_config_byte
(
card
->
device
,
PCI_INTERRUPT_LINE
,
&
interrupt_line_register
))
{
dbg
(
DBG_ASS
,
"Error in reading interrupt line register
\n
"
);
}
/*
* Assert PLX software reset and Am186 hardware reset
* and then deassert the PLX software reset but 186 still in reset
*/
outw
(
0x440f
,
card
->
pci_conf
+
CNTRL_9054
+
2
);
outw
(
0x040f
,
card
->
pci_conf
+
CNTRL_9054
+
2
);
/*
* We are delaying here to allow the 9054 to reset itself
*/
j
=
jiffies
+
1
;
while
(
jiffies
<
j
)
/* Do nothing */
;
outw
(
0x240f
,
card
->
pci_conf
+
CNTRL_9054
+
2
);
/*
* We are delaying here to allow the 9054 to reload its eeprom
*/
j
=
jiffies
+
1
;
while
(
jiffies
<
j
)
/* Do nothing */
;
outw
(
0x040f
,
card
->
pci_conf
+
CNTRL_9054
+
2
);
if
(
pci_write_config_byte
(
card
->
device
,
PCI_INTERRUPT_LINE
,
interrupt_line_register
))
{
dbg
(
DBG_ASS
,
"Error in writing interrupt line register
\n
"
);
}
}
else
{
regval
=
inl
(
card
->
pci_conf
+
CNTRL_9052
);
outl
(
regval
|
0x40000000
,
card
->
pci_conf
+
0x50
);
outl
(
regval
|
0x40000000
,
card
->
pci_conf
+
CNTRL_9052
);
outl
(
regval
&
~
0x40000000
,
card
->
pci_conf
+
0x50
);
outl
(
regval
&
~
0x40000000
,
card
->
pci_conf
+
CNTRL_9052
);
}
}
}
/* Release the processor from reset
/* Release the processor from reset
*/
*/
static
inline
void
static
inline
void
fst_cpurelease
(
struct
fst_card_info
*
card
)
fst_cpurelease
(
struct
fst_card_info
*
card
)
{
{
(
void
)
readb
(
card
->
ctlmem
);
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
/*
* Force posted writes to complete
*/
(
void
)
readb
(
card
->
mem
);
/*
* Release LRESET DO = 1
* Then release Local Hold, DO = 1
*/
outw
(
0x040e
,
card
->
pci_conf
+
CNTRL_9054
+
2
);
outw
(
0x040f
,
card
->
pci_conf
+
CNTRL_9054
+
2
);
}
else
{
(
void
)
readb
(
card
->
ctlmem
);
}
}
}
/* Clear the cards interrupt flag
/* Clear the cards interrupt flag
*/
*/
static
inline
void
static
inline
void
fst_clear_intr
(
struct
fst_card_info
*
card
)
fst_clear_intr
(
struct
fst_card_info
*
card
)
{
{
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
(
void
)
readb
(
card
->
ctlmem
);
}
else
{
/* Poke the appropriate PLX chip register (same as enabling interrupts)
/* Poke the appropriate PLX chip register (same as enabling interrupts)
*/
*/
outw
(
0x0543
,
card
->
pci_conf
+
0x4C
);
outw
(
0x0543
,
card
->
pci_conf
+
INTCSR_9052
);
}
}
/* Enable card interrupts
*/
static
inline
void
fst_enable_intr
(
struct
fst_card_info
*
card
)
{
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
outl
(
0x0f0c0900
,
card
->
pci_conf
+
INTCSR_9054
);
}
else
{
outw
(
0x0543
,
card
->
pci_conf
+
INTCSR_9052
);
}
}
}
/* Disable card interrupts
/* Disable card interrupts
*/
*/
static
inline
void
static
inline
void
fst_disable_intr
(
struct
fst_card_info
*
card
)
fst_disable_intr
(
struct
fst_card_info
*
card
)
{
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
outl
(
0x00000000
,
card
->
pci_conf
+
INTCSR_9054
);
}
else
{
outw
(
0x0000
,
card
->
pci_conf
+
INTCSR_9052
);
}
}
/* Process the result of trying to pass a recieved frame up the stack
*/
static
void
fst_process_rx_status
(
int
rx_status
,
char
*
name
)
{
switch
(
rx_status
)
{
case
NET_RX_SUCCESS
:
{
/*
* Nothing to do here
*/
break
;
}
case
NET_RX_CN_LOW
:
{
dbg
(
DBG_ASS
,
"%s: Receive Low Congestion
\n
"
,
name
);
break
;
}
case
NET_RX_CN_MOD
:
{
dbg
(
DBG_ASS
,
"%s: Receive Moderate Congestion
\n
"
,
name
);
break
;
}
case
NET_RX_CN_HIGH
:
{
dbg
(
DBG_ASS
,
"%s: Receive High Congestion
\n
"
,
name
);
break
;
}
case
NET_RX_DROP
:
{
dbg
(
DBG_ASS
,
"%s: Received packet dropped
\n
"
,
name
);
break
;
}
}
}
/* Initilaise DMA for PLX 9054
*/
static
inline
void
fst_init_dma
(
struct
fst_card_info
*
card
)
{
{
outw
(
0x0000
,
card
->
pci_conf
+
0x4C
);
/*
* This is only required for the PLX 9054
*/
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
pci_set_master
(
card
->
device
);
outl
(
0x00020441
,
card
->
pci_conf
+
DMAMODE0
);
outl
(
0x00020441
,
card
->
pci_conf
+
DMAMODE1
);
outl
(
0x0
,
card
->
pci_conf
+
DMATHR
);
}
}
}
/* Tx dma complete interrupt
*/
static
void
fst_tx_dma_complete
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
int
len
,
int
txpos
)
{
struct
net_device
*
dev
=
port_to_dev
(
port
);
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
);
/*
* Everything is now set, just tell the card to go
*/
dbg
(
DBG_TX
,
"fst_tx_dma_complete
\n
"
);
FST_WRB
(
card
,
txDescrRing
[
port
->
index
][
txpos
].
bits
,
DMA_OWN
|
TX_STP
|
TX_ENP
);
stats
->
tx_packets
++
;
stats
->
tx_bytes
+=
len
;
dev
->
trans_start
=
jiffies
;
}
/* Rx dma complete interrupt
*/
static
void
fst_rx_dma_complete
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
int
len
,
struct
sk_buff
*
skb
,
int
rxp
)
{
struct
net_device
*
dev
=
port_to_dev
(
port
);
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
);
int
pi
;
int
rx_status
;
dbg
(
DBG_TX
,
"fst_rx_dma_complete
\n
"
);
pi
=
port
->
index
;
memcpy
(
skb_put
(
skb
,
len
),
card
->
rx_dma_handle_host
,
len
);
/* Reset buffer descriptor */
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
/* Update stats */
stats
->
rx_packets
++
;
stats
->
rx_bytes
+=
len
;
/* Push upstream */
dbg
(
DBG_RX
,
"Pushing the frame up the stack
\n
"
);
skb
->
mac
.
raw
=
skb
->
data
;
skb
->
dev
=
dev
;
if
(
port
->
mode
==
FST_RAW
)
{
/*
* Mark it for our own raw sockets interface
*/
skb
->
protocol
=
htons
(
ETH_P_CUST
);
skb
->
pkt_type
=
PACKET_HOST
;
}
else
{
skb
->
protocol
=
hdlc_type_trans
(
skb
,
skb
->
dev
);
}
rx_status
=
netif_rx
(
skb
);
fst_process_rx_status
(
rx_status
,
port_to_dev
(
port
)
->
name
);
if
(
rx_status
==
NET_RX_DROP
)
stats
->
rx_dropped
++
;
dev
->
last_rx
=
jiffies
;
}
/*
* Receive a frame through the DMA
*/
static
inline
void
fst_rx_dma
(
struct
fst_card_info
*
card
,
unsigned
char
*
skb
,
unsigned
char
*
mem
,
int
len
)
{
/*
* This routine will setup the DMA and start it
*/
dbg
(
DBG_RX
,
"In fst_rx_dma %p %p %d
\n
"
,
skb
,
mem
,
len
);
if
(
card
->
dmarx_in_progress
)
{
dbg
(
DBG_ASS
,
"In fst_rx_dma while dma in progress
\n
"
);
}
outl
((
unsigned
long
)
skb
,
card
->
pci_conf
+
DMAPADR0
);
/* Copy to here */
outl
((
unsigned
long
)
mem
,
card
->
pci_conf
+
DMALADR0
);
/* from here */
outl
(
len
,
card
->
pci_conf
+
DMASIZ0
);
/* for this length */
outl
(
0x00000000c
,
card
->
pci_conf
+
DMADPR0
);
/* In this direction */
/*
* We use the dmarx_in_progress flag to flag the channel as busy
*/
card
->
dmarx_in_progress
=
1
;
outb
(
0x03
,
card
->
pci_conf
+
DMACSR0
);
/* Start the transfer */
}
/*
* Send a frame through the DMA
*/
static
inline
void
fst_tx_dma
(
struct
fst_card_info
*
card
,
unsigned
char
*
skb
,
unsigned
char
*
mem
,
int
len
)
{
/*
* This routine will setup the DMA and start it.
*/
dbg
(
DBG_TX
,
"In fst_tx_dma %p %p %d
\n
"
,
skb
,
mem
,
len
);
if
(
card
->
dmatx_in_progress
)
{
dbg
(
DBG_ASS
,
"In fst_tx_dma while dma in progress
\n
"
);
}
outl
((
unsigned
long
)
skb
,
card
->
pci_conf
+
DMAPADR1
);
/* Copy from here */
outl
((
unsigned
long
)
mem
,
card
->
pci_conf
+
DMALADR1
);
/* to here */
outl
(
len
,
card
->
pci_conf
+
DMASIZ1
);
/* for this length */
outl
(
0x000000004
,
card
->
pci_conf
+
DMADPR1
);
/* In this direction */
/*
* We use the dmatx_in_progress to flag the channel as busy
*/
card
->
dmatx_in_progress
=
1
;
outb
(
0x03
,
card
->
pci_conf
+
DMACSR1
);
/* Start the transfer */
}
/* Issue a Mailbox command for a port.
/* Issue a Mailbox command for a port.
* Note we issue them on a fire and forget basis, not expecting to see an
* Note we issue them on a fire and forget basis, not expecting to see an
* error and not waiting for completion.
* error and not waiting for completion.
*/
*/
static
void
static
void
fst_issue_cmd
(
struct
fst_port_info
*
port
,
unsigned
short
cmd
)
fst_issue_cmd
(
struct
fst_port_info
*
port
,
unsigned
short
cmd
)
{
{
struct
fst_card_info
*
card
;
struct
fst_card_info
*
card
;
unsigned
short
mbval
;
unsigned
short
mbval
;
...
@@ -485,75 +967,68 @@ fst_issue_cmd ( struct fst_port_info *port, unsigned short cmd )
...
@@ -485,75 +967,68 @@ fst_issue_cmd ( struct fst_port_info *port, unsigned short cmd )
int
safety
;
int
safety
;
card
=
port
->
card
;
card
=
port
->
card
;
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
mbval
=
FST_RDW
(
card
,
portMailbox
[
port
->
index
][
0
]);
mbval
=
FST_RDW
(
card
,
portMailbox
[
port
->
index
][
0
]);
safety
=
0
;
safety
=
0
;
/* Wait for any previous command to complete */
/* Wait for any previous command to complete */
while
(
mbval
>
NAK
)
while
(
mbval
>
NAK
)
{
{
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
schedule_timeout
(
1
);
schedule_timeout
(
1
);
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
if
(
++
safety
>
1000
)
if
(
++
safety
>
2000
)
{
{
printk_err
(
"Mailbox safety timeout
\n
"
);
printk_err
(
"Mailbox safety timeout
\n
"
);
break
;
break
;
}
}
mbval
=
FST_RDW
(
card
,
portMailbox
[
port
->
index
][
0
]);
mbval
=
FST_RDW
(
card
,
portMailbox
[
port
->
index
][
0
]);
}
}
if
(
safety
>
0
)
if
(
safety
>
0
)
{
{
dbg
(
DBG_CMD
,
"Mailbox clear after %d jiffies
\n
"
,
safety
);
dbg
(
DBG_CMD
,
"Mailbox clear after %d jiffies
\n
"
,
safety
);
}
}
if
(
mbval
==
NAK
)
if
(
mbval
==
NAK
)
{
{
dbg
(
DBG_CMD
,
"issue_cmd: previous command was NAK'd
\n
"
);
dbg
(
DBG_CMD
,
"issue_cmd: previous command was NAK'd
\n
"
);
}
}
FST_WRW
(
card
,
portMailbox
[
port
->
index
][
0
],
cmd
);
FST_WRW
(
card
,
portMailbox
[
port
->
index
][
0
],
cmd
);
if
(
cmd
==
ABORTTX
||
cmd
==
STARTPORT
)
if
(
cmd
==
ABORTTX
||
cmd
==
STARTPORT
)
{
{
port
->
txpos
=
0
;
port
->
txpos
=
0
;
port
->
txipos
=
0
;
port
->
txipos
=
0
;
port
->
txcnt
=
0
;
port
->
start
=
0
;
}
}
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
}
}
/* Port output signals control
/* Port output signals control
*/
*/
static
inline
void
static
inline
void
fst_op_raise
(
struct
fst_port_info
*
port
,
unsigned
int
outputs
)
fst_op_raise
(
struct
fst_port_info
*
port
,
unsigned
int
outputs
)
{
{
outputs
|=
FST_RDL
(
port
->
card
,
v24OpSts
[
port
->
index
]);
outputs
|=
FST_RDL
(
port
->
card
,
v24OpSts
[
port
->
index
]);
FST_WRL
(
port
->
card
,
v24OpSts
[
port
->
index
],
outputs
);
FST_WRL
(
port
->
card
,
v24OpSts
[
port
->
index
],
outputs
);
if
(
port
->
run
)
if
(
port
->
run
)
fst_issue_cmd
(
port
,
SETV24O
);
fst_issue_cmd
(
port
,
SETV24O
);
}
}
static
inline
void
static
inline
void
fst_op_lower
(
struct
fst_port_info
*
port
,
unsigned
int
outputs
)
fst_op_lower
(
struct
fst_port_info
*
port
,
unsigned
int
outputs
)
{
{
outputs
=
~
outputs
&
FST_RDL
(
port
->
card
,
v24OpSts
[
port
->
index
]);
outputs
=
~
outputs
&
FST_RDL
(
port
->
card
,
v24OpSts
[
port
->
index
]);
FST_WRL
(
port
->
card
,
v24OpSts
[
port
->
index
],
outputs
);
FST_WRL
(
port
->
card
,
v24OpSts
[
port
->
index
],
outputs
);
if
(
port
->
run
)
if
(
port
->
run
)
fst_issue_cmd
(
port
,
SETV24O
);
fst_issue_cmd
(
port
,
SETV24O
);
}
}
/*
/*
* Setup port Rx buffers
* Setup port Rx buffers
*/
*/
static
void
static
void
fst_rx_config
(
struct
fst_port_info
*
port
)
fst_rx_config
(
struct
fst_port_info
*
port
)
{
{
int
i
;
int
i
;
int
pi
;
int
pi
;
...
@@ -563,28 +1038,25 @@ fst_rx_config ( struct fst_port_info *port )
...
@@ -563,28 +1038,25 @@ fst_rx_config ( struct fst_port_info *port )
pi
=
port
->
index
;
pi
=
port
->
index
;
card
=
port
->
card
;
card
=
port
->
card
;
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
for
(
i
=
0
;
i
<
NUM_RX_BUFFER
;
i
++
)
for
(
i
=
0
;
i
<
NUM_RX_BUFFER
;
i
++
)
{
{
offset
=
BUF_OFFSET
(
rxBuffer
[
pi
][
i
][
0
]);
offset
=
BUF_OFFSET
(
rxBuffer
[
pi
][
i
][
0
]);
FST_WRW
(
card
,
rxDescrRing
[
pi
][
i
].
ladr
,
(
u16
)
offset
);
FST_WRW
(
card
,
rxDescrRing
[
pi
][
i
].
ladr
,
(
u16
)
offset
);
FST_WRB
(
card
,
rxDescrRing
[
pi
][
i
].
hadr
,
(
u8
)(
offset
>>
16
));
FST_WRB
(
card
,
rxDescrRing
[
pi
][
i
].
hadr
,
(
u8
)
(
offset
>>
16
));
FST_WRW
(
card
,
rxDescrRing
[
pi
][
i
].
bcnt
,
FST_WRW
(
card
,
rxDescrRing
[
pi
][
i
].
bcnt
,
cnv_bcnt
(
LEN_RX_BUFFER
));
cnv_bcnt
(
LEN_RX_BUFFER
));
FST_WRW
(
card
,
rxDescrRing
[
pi
][
i
].
mcnt
,
LEN_RX_BUFFER
);
FST_WRW
(
card
,
rxDescrRing
[
pi
][
i
].
mcnt
,
0
);
FST_WRB
(
card
,
rxDescrRing
[
pi
][
i
].
bits
,
DMA_OWN
);
FST_WRB
(
card
,
rxDescrRing
[
pi
][
i
].
bits
,
DMA_OWN
);
}
}
port
->
rxpos
=
0
;
port
->
rxpos
=
0
;
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
}
}
/*
/*
* Setup port Tx buffers
* Setup port Tx buffers
*/
*/
static
void
static
void
fst_tx_config
(
struct
fst_port_info
*
port
)
fst_tx_config
(
struct
fst_port_info
*
port
)
{
{
int
i
;
int
i
;
int
pi
;
int
pi
;
...
@@ -594,238 +1066,564 @@ fst_tx_config ( struct fst_port_info *port )
...
@@ -594,238 +1066,564 @@ fst_tx_config ( struct fst_port_info *port )
pi
=
port
->
index
;
pi
=
port
->
index
;
card
=
port
->
card
;
card
=
port
->
card
;
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
for
(
i
=
0
;
i
<
NUM_TX_BUFFER
;
i
++
)
for
(
i
=
0
;
i
<
NUM_TX_BUFFER
;
i
++
)
{
{
offset
=
BUF_OFFSET
(
txBuffer
[
pi
][
i
][
0
]);
offset
=
BUF_OFFSET
(
txBuffer
[
pi
][
i
][
0
]);
FST_WRW
(
card
,
txDescrRing
[
pi
][
i
].
ladr
,
(
u16
)
offset
);
FST_WRW
(
card
,
txDescrRing
[
pi
][
i
].
ladr
,
(
u16
)
offset
);
FST_WRB
(
card
,
txDescrRing
[
pi
][
i
].
hadr
,
(
u8
)(
offset
>>
16
));
FST_WRB
(
card
,
txDescrRing
[
pi
][
i
].
hadr
,
(
u8
)
(
offset
>>
16
));
FST_WRW
(
card
,
txDescrRing
[
pi
][
i
].
bcnt
,
0
);
FST_WRW
(
card
,
txDescrRing
[
pi
][
i
].
bcnt
,
0
);
FST_WRB
(
card
,
txDescrRing
[
pi
][
i
].
bits
,
0
);
FST_WRB
(
card
,
txDescrRing
[
pi
][
i
].
bits
,
0
);
}
}
port
->
txpos
=
0
;
port
->
txpos
=
0
;
port
->
txipos
=
0
;
port
->
txipos
=
0
;
port
->
txcnt
=
0
;
port
->
start
=
0
;
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
}
}
/* TE1 Alarm change interrupt event
*/
static
void
fst_intr_te1_alarm
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
)
{
u8
los
;
u8
rra
;
u8
ais
;
los
=
FST_RDB
(
card
,
suStatus
.
lossOfSignal
);
rra
=
FST_RDB
(
card
,
suStatus
.
receiveRemoteAlarm
);
ais
=
FST_RDB
(
card
,
suStatus
.
alarmIndicationSignal
);
if
(
los
)
{
/*
* Lost the link
*/
if
(
netif_carrier_ok
(
port_to_dev
(
port
)))
{
dbg
(
DBG_INTR
,
"Net carrier off
\n
"
);
netif_carrier_off
(
port_to_dev
(
port
));
}
}
else
{
/*
* Link available
*/
if
(
!
netif_carrier_ok
(
port_to_dev
(
port
)))
{
dbg
(
DBG_INTR
,
"Net carrier on
\n
"
);
netif_carrier_on
(
port_to_dev
(
port
));
}
}
if
(
los
)
dbg
(
DBG_INTR
,
"Assert LOS Alarm
\n
"
);
else
dbg
(
DBG_INTR
,
"De-assert LOS Alarm
\n
"
);
if
(
rra
)
dbg
(
DBG_INTR
,
"Assert RRA Alarm
\n
"
);
else
dbg
(
DBG_INTR
,
"De-assert RRA Alarm
\n
"
);
if
(
ais
)
dbg
(
DBG_INTR
,
"Assert AIS Alarm
\n
"
);
else
dbg
(
DBG_INTR
,
"De-assert AIS Alarm
\n
"
);
}
/* Control signal change interrupt event
/* Control signal change interrupt event
*/
*/
static
irqreturn_t
static
void
fst_intr_ctlchg
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
)
fst_intr_ctlchg
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
)
{
{
int
signals
;
int
signals
;
signals
=
FST_RDL
(
card
,
v24DebouncedSts
[
port
->
index
]);
signals
=
FST_RDL
(
card
,
v24DebouncedSts
[
port
->
index
]);
if
(
signals
&
((
port
->
hwif
==
X21
)
?
IPSTS_INDICATE
:
IPSTS_DCD
))
if
(
signals
&
(((
port
->
hwif
==
X21
)
||
(
port
->
hwif
==
X21D
))
{
?
IPSTS_INDICATE
:
IPSTS_DCD
))
{
if
(
!
netif_carrier_ok
(
port_to_dev
(
port
)))
if
(
!
netif_carrier_ok
(
port_to_dev
(
port
)))
{
{
dbg
(
DBG_INTR
,
"DCD active
\n
"
);
dbg
(
DBG_INTR
,
"DCD active
\n
"
);
netif_carrier_on
(
port_to_dev
(
port
));
netif_carrier_on
(
port_to_dev
(
port
));
}
}
}
else
{
if
(
netif_carrier_ok
(
port_to_dev
(
port
)))
{
dbg
(
DBG_INTR
,
"DCD lost
\n
"
);
netif_carrier_off
(
port_to_dev
(
port
));
}
}
else
{
if
(
netif_carrier_ok
(
port_to_dev
(
port
)))
{
dbg
(
DBG_INTR
,
"DCD lost
\n
"
);
netif_carrier_off
(
port_to_dev
(
port
));
}
}
}
/* Log Rx Errors
*/
static
void
fst_log_rx_error
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
unsigned
char
dmabits
,
int
rxp
,
unsigned
short
len
)
{
struct
net_device
*
dev
=
port_to_dev
(
port
);
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
);
/*
* Increment the appropriate error counter
*/
stats
->
rx_errors
++
;
if
(
dmabits
&
RX_OFLO
)
{
stats
->
rx_fifo_errors
++
;
dbg
(
DBG_ASS
,
"Rx fifo error on card %d port %d buffer %d
\n
"
,
card
->
card_no
,
port
->
index
,
rxp
);
}
if
(
dmabits
&
RX_CRC
)
{
stats
->
rx_crc_errors
++
;
dbg
(
DBG_ASS
,
"Rx crc error on card %d port %d
\n
"
,
card
->
card_no
,
port
->
index
);
}
if
(
dmabits
&
RX_FRAM
)
{
stats
->
rx_frame_errors
++
;
dbg
(
DBG_ASS
,
"Rx frame error on card %d port %d
\n
"
,
card
->
card_no
,
port
->
index
);
}
if
(
dmabits
==
(
RX_STP
|
RX_ENP
))
{
stats
->
rx_length_errors
++
;
dbg
(
DBG_ASS
,
"Rx length error (%d) on card %d port %d
\n
"
,
len
,
card
->
card_no
,
port
->
index
);
}
}
return
IRQ_HANDLED
;
}
}
/* Rx Error Recovery
*/
static
void
fst_recover_rx_error
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
unsigned
char
dmabits
,
int
rxp
,
unsigned
short
len
)
{
int
i
;
int
pi
;
pi
=
port
->
index
;
/*
* Discard buffer descriptors until we see the start of the
* next frame. Note that for long frames this could be in
* a subsequent interrupt.
*/
i
=
0
;
while
((
dmabits
&
(
DMA_OWN
|
RX_STP
))
==
0
)
{
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
rxp
=
(
rxp
+
1
)
%
NUM_RX_BUFFER
;
if
(
++
i
>
NUM_RX_BUFFER
)
{
dbg
(
DBG_ASS
,
"intr_rx: Discarding more bufs"
" than we have
\n
"
);
break
;
}
dmabits
=
FST_RDB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
);
dbg
(
DBG_ASS
,
"DMA Bits of next buffer was %x
\n
"
,
dmabits
);
}
dbg
(
DBG_ASS
,
"There were %d subsequent buffers in error
\n
"
,
i
);
/* Discard the terminal buffer */
if
(
!
(
dmabits
&
DMA_OWN
))
{
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
rxp
=
(
rxp
+
1
)
%
NUM_RX_BUFFER
;
}
port
->
rxpos
=
rxp
;
return
;
}
/* Rx complete interrupt
/* Rx complete interrupt
*/
*/
static
void
static
void
fst_intr_rx
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
)
fst_intr_rx
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
)
{
{
unsigned
char
dmabits
;
unsigned
char
dmabits
;
int
pi
;
int
pi
;
int
rxp
;
int
rxp
;
int
rx_status
;
unsigned
short
len
;
unsigned
short
len
;
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
struct
net_device
*
dev
=
port_to_dev
(
port
);
struct
net_device
*
dev
=
port_to_dev
(
port
);
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
);
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
);
int
i
;
/* Check we have a buffer to process */
/* Check we have a buffer to process */
pi
=
port
->
index
;
pi
=
port
->
index
;
rxp
=
port
->
rxpos
;
rxp
=
port
->
rxpos
;
dmabits
=
FST_RDB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
);
dmabits
=
FST_RDB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
);
if
(
dmabits
&
DMA_OWN
)
if
(
dmabits
&
DMA_OWN
)
{
{
dbg
(
DBG_RX
|
DBG_INTR
,
"intr_rx: No buffer port %d pos %d
\n
"
,
dbg
(
DBG_RX
|
DBG_INTR
,
"intr_rx: No buffer port %d pos %d
\n
"
,
pi
,
rxp
);
pi
,
rxp
);
return
;
}
if
(
card
->
dmarx_in_progress
)
{
return
;
return
;
}
}
/* Get buffer length */
/* Get buffer length */
len
=
FST_RDW
(
card
,
rxDescrRing
[
pi
][
rxp
].
mcnt
);
len
=
FST_RDW
(
card
,
rxDescrRing
[
pi
][
rxp
].
mcnt
);
/* Discard the CRC */
/* Discard the CRC */
len
-=
2
;
len
-=
2
;
if
(
len
==
0
)
{
/* Check buffer length and for other errors. We insist on one packet
/*
* in one buffer. This simplifies things greatly and since we've
* This seems to happen on the TE1 interface sometimes
* allocated 8K it shouldn't be a real world limitation
* so throw the frame away and log the event.
*/
*/
dbg
(
DBG_RX
,
"intr_rx: %d,%d: flags %x len %d
\n
"
,
pi
,
rxp
,
dmabits
,
printk_err
(
"Frame received with 0 length. Card %d Port %d
\n
"
,
len
);
card
->
card_no
,
port
->
index
);
if
(
dmabits
!=
(
RX_STP
|
RX_ENP
)
||
len
>
LEN_RX_BUFFER
-
2
)
/* Return descriptor to card */
{
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
stats
->
rx_errors
++
;
/* Update error stats and discard buffer */
rxp
=
(
rxp
+
1
)
%
NUM_RX_BUFFER
;
if
(
dmabits
&
RX_OFLO
)
port
->
rxpos
=
rxp
;
{
return
;
stats
->
rx_fifo_errors
++
;
}
if
(
dmabits
&
RX_CRC
)
{
stats
->
rx_crc_errors
++
;
}
if
(
dmabits
&
RX_FRAM
)
{
stats
->
rx_frame_errors
++
;
}
if
(
dmabits
==
(
RX_STP
|
RX_ENP
))
{
stats
->
rx_length_errors
++
;
}
}
/* Discard buffer descriptors until we see the end of packet
/* Check buffer length and for other errors. We insist on one packet
* marker
* in one buffer. This simplifies things greatly and since we've
* allocated 8K it shouldn't be a real world limitation
*/
*/
i
=
0
;
dbg
(
DBG_RX
,
"intr_rx: %d,%d: flags %x len %d
\n
"
,
pi
,
rxp
,
dmabits
,
len
);
while
((
dmabits
&
(
DMA_OWN
|
RX_ENP
))
==
0
)
if
(
dmabits
!=
(
RX_STP
|
RX_ENP
)
||
len
>
LEN_RX_BUFFER
-
2
)
{
{
fst_log_rx_error
(
card
,
port
,
dmabits
,
rxp
,
len
);
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
fst_recover_rx_error
(
card
,
port
,
dmabits
,
rxp
,
len
);
if
(
++
rxp
>=
NUM_RX_BUFFER
)
rxp
=
0
;
if
(
++
i
>
NUM_RX_BUFFER
)
{
dbg
(
DBG_ASS
,
"intr_rx: Discarding more bufs"
" than we have
\n
"
);
break
;
}
dmabits
=
FST_RDB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
);
}
/* Discard the terminal buffer */
if
(
!
(
dmabits
&
DMA_OWN
))
{
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
if
(
++
rxp
>=
NUM_RX_BUFFER
)
rxp
=
0
;
}
port
->
rxpos
=
rxp
;
return
;
return
;
}
}
/* Allocate SKB */
/* Allocate SKB */
if
((
skb
=
dev_alloc_skb
(
len
))
==
NULL
)
if
((
skb
=
dev_alloc_skb
(
len
))
==
NULL
)
{
{
dbg
(
DBG_RX
,
"intr_rx: can't allocate buffer
\n
"
);
dbg
(
DBG_RX
,
"intr_rx: can't allocate buffer
\n
"
);
stats
->
rx_dropped
++
;
stats
->
rx_dropped
++
;
/* Return descriptor to card */
/* Return descriptor to card */
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
if
(
++
rxp
>=
NUM_RX_BUFFER
)
rxp
=
(
rxp
+
1
)
%
NUM_RX_BUFFER
;
port
->
rxpos
=
0
;
else
port
->
rxpos
=
rxp
;
port
->
rxpos
=
rxp
;
return
;
return
;
}
}
memcpy_fromio
(
skb_put
(
skb
,
len
),
/*
card
->
mem
+
BUF_OFFSET
(
rxBuffer
[
pi
][
rxp
][
0
]),
* We know the length we need to receive, len.
len
);
* It's not worth using the DMA for reads of less than
* FST_MIN_DMA_LEN
*/
if
((
len
<
FST_MIN_DMA_LEN
)
||
(
card
->
family
==
FST_FAMILY_TXP
))
{
memcpy_fromio
(
skb_put
(
skb
,
len
),
card
->
mem
+
BUF_OFFSET
(
rxBuffer
[
pi
][
rxp
][
0
]),
len
);
/* Reset buffer descriptor */
/* Reset buffer descriptor */
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
FST_WRB
(
card
,
rxDescrRing
[
pi
][
rxp
].
bits
,
DMA_OWN
);
if
(
++
rxp
>=
NUM_RX_BUFFER
)
port
->
rxpos
=
0
;
/* Update stats */
else
stats
->
rx_packets
++
;
stats
->
rx_bytes
+=
len
;
/* Push upstream */
dbg
(
DBG_RX
,
"Pushing frame up the stack
\n
"
);
skb
->
mac
.
raw
=
skb
->
data
;
skb
->
dev
=
dev
;
if
(
port
->
mode
==
FST_RAW
)
{
/*
* Mark it for our own raw sockets interface
*/
skb
->
protocol
=
htons
(
ETH_P_CUST
);
skb
->
pkt_type
=
PACKET_HOST
;
}
else
{
skb
->
protocol
=
hdlc_type_trans
(
skb
,
skb
->
dev
);
}
rx_status
=
netif_rx
(
skb
);
fst_process_rx_status
(
rx_status
,
port_to_dev
(
port
)
->
name
);
if
(
rx_status
==
NET_RX_DROP
)
{
stats
->
rx_dropped
++
;
}
dev
->
last_rx
=
jiffies
;
}
else
{
card
->
dma_skb_rx
=
skb
;
card
->
dma_port_rx
=
port
;
card
->
dma_len_rx
=
len
;
card
->
dma_rxpos
=
rxp
;
fst_rx_dma
(
card
,
(
char
*
)
card
->
rx_dma_handle_card
,
(
char
*
)
BUF_OFFSET
(
rxBuffer
[
pi
][
rxp
][
0
]),
len
);
}
if
(
rxp
!=
port
->
rxpos
)
{
dbg
(
DBG_ASS
,
"About to increment rxpos by more than 1
\n
"
);
dbg
(
DBG_ASS
,
"rxp = %d rxpos = %d
\n
"
,
rxp
,
port
->
rxpos
);
}
rxp
=
(
rxp
+
1
)
%
NUM_RX_BUFFER
;
port
->
rxpos
=
rxp
;
port
->
rxpos
=
rxp
;
}
/*
* The bottom halfs to the ISR
*
*/
static
void
do_bottom_half_tx
(
struct
fst_card_info
*
card
)
{
struct
fst_port_info
*
port
;
int
pi
;
int
txq_length
;
struct
sk_buff
*
skb
;
unsigned
long
flags
;
struct
net_device
*
dev
;
struct
net_device_stats
*
stats
;
/* Update stats */
/*
stats
->
rx_packets
++
;
* Find a free buffer for the transmit
stats
->
rx_bytes
+=
len
;
* Step through each port on this card
*/
/* Push upstream */
dbg
(
DBG_TX
,
"do_bottom_half_tx
\n
"
);
skb
->
mac
.
raw
=
skb
->
data
;
for
(
pi
=
0
,
port
=
card
->
ports
;
pi
<
card
->
nports
;
pi
++
,
port
++
)
{
skb
->
dev
=
dev
;
if
(
!
port
->
run
)
skb
->
protocol
=
hdlc_type_trans
(
skb
,
skb
->
dev
);
continue
;
netif_rx
(
skb
);
dev
->
last_rx
=
jiffies
;
dev
=
port_to_dev
(
port
);
stats
=
hdlc_stats
(
dev
);
while
(
!
(
FST_RDB
(
card
,
txDescrRing
[
pi
][
port
->
txpos
].
bits
)
&
DMA_OWN
)
&&
!
(
card
->
dmatx_in_progress
))
{
/*
* There doesn't seem to be a txdone event per-se
* We seem to have to deduce it, by checking the DMA_OWN
* bit on the next buffer we think we can use
*/
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
if
((
txq_length
=
port
->
txqe
-
port
->
txqs
)
<
0
)
{
/*
* This is the case where one has wrapped and the
* maths gives us a negative number
*/
txq_length
=
txq_length
+
FST_TXQ_DEPTH
;
}
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
if
(
txq_length
>
0
)
{
/*
* There is something to send
*/
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
skb
=
port
->
txq
[
port
->
txqs
];
port
->
txqs
++
;
if
(
port
->
txqs
==
FST_TXQ_DEPTH
)
{
port
->
txqs
=
0
;
}
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
/*
* copy the data and set the required indicators on the
* card.
*/
FST_WRW
(
card
,
txDescrRing
[
pi
][
port
->
txpos
].
bcnt
,
cnv_bcnt
(
skb
->
len
));
if
((
skb
->
len
<
FST_MIN_DMA_LEN
)
||
(
card
->
family
==
FST_FAMILY_TXP
))
{
/* Enqueue the packet with normal io */
memcpy_toio
(
card
->
mem
+
BUF_OFFSET
(
txBuffer
[
pi
]
[
port
->
txpos
][
0
]),
skb
->
data
,
skb
->
len
);
FST_WRB
(
card
,
txDescrRing
[
pi
][
port
->
txpos
].
bits
,
DMA_OWN
|
TX_STP
|
TX_ENP
);
stats
->
tx_packets
++
;
stats
->
tx_bytes
+=
skb
->
len
;
dev
->
trans_start
=
jiffies
;
}
else
{
/* Or do it through dma */
memcpy
(
card
->
tx_dma_handle_host
,
skb
->
data
,
skb
->
len
);
card
->
dma_port_tx
=
port
;
card
->
dma_len_tx
=
skb
->
len
;
card
->
dma_txpos
=
port
->
txpos
;
fst_tx_dma
(
card
,
(
char
*
)
card
->
tx_dma_handle_card
,
(
char
*
)
BUF_OFFSET
(
txBuffer
[
pi
]
[
port
->
txpos
][
0
]),
skb
->
len
);
}
if
(
++
port
->
txpos
>=
NUM_TX_BUFFER
)
port
->
txpos
=
0
;
/*
* If we have flow control on, can we now release it?
*/
if
(
port
->
start
)
{
if
(
txq_length
<
fst_txq_low
)
{
netif_wake_queue
(
port_to_dev
(
port
));
port
->
start
=
0
;
}
}
dev_kfree_skb
(
skb
);
}
else
{
/*
* Nothing to send so break out of the while loop
*/
break
;
}
}
}
}
}
static
void
do_bottom_half_rx
(
struct
fst_card_info
*
card
)
{
struct
fst_port_info
*
port
;
int
pi
;
int
rx_count
=
0
;
/* Check for rx completions on all ports on this card */
dbg
(
DBG_RX
,
"do_bottom_half_rx
\n
"
);
for
(
pi
=
0
,
port
=
card
->
ports
;
pi
<
card
->
nports
;
pi
++
,
port
++
)
{
if
(
!
port
->
run
)
continue
;
while
(
!
(
FST_RDB
(
card
,
rxDescrRing
[
pi
][
port
->
rxpos
].
bits
)
&
DMA_OWN
)
&&
!
(
card
->
dmarx_in_progress
))
{
if
(
rx_count
>
fst_max_reads
)
{
/*
* Don't spend forever in receive processing
* Schedule another event
*/
fst_q_work_item
(
&
fst_work_intq
,
card
->
card_no
);
tasklet_schedule
(
&
fst_int_task
);
break
;
/* Leave the loop */
}
fst_intr_rx
(
card
,
port
);
rx_count
++
;
}
}
}
/*
/*
* The interrupt service routine
* The interrupt service routine
* Dev_id is our fst_card_info pointer
* Dev_id is our fst_card_info pointer
*/
*/
static
irqreturn_t
irqreturn_t
fst_intr
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
fst_intr
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
{
{
struct
fst_card_info
*
card
;
struct
fst_card_info
*
card
;
struct
fst_port_info
*
port
;
struct
fst_port_info
*
port
;
int
rdidx
;
/* Event buffer indices */
int
rdidx
;
/* Event buffer indices */
int
wridx
;
int
wridx
;
int
event
;
/* Actual event for processing */
int
event
;
/* Actual event for processing */
int
pi
;
unsigned
int
dma_intcsr
=
0
;
unsigned
int
do_card_interrupt
;
unsigned
int
int_retry_count
;
if
((
card
=
dev_id
)
==
NULL
)
if
((
card
=
dev_id
)
==
NULL
)
{
{
dbg
(
DBG_INTR
,
"intr: spurious %d
\n
"
,
irq
);
dbg
(
DBG_INTR
,
"intr: spurious %d
\n
"
,
irq
);
return
IRQ_NONE
;
return
IRQ_NONE
;
}
}
dbg
(
DBG_INTR
,
"intr: %d %p
\n
"
,
irq
,
card
);
/*
* Check to see if the interrupt was for this card
* return if not
* Note that the call to clear the interrupt is important
*/
dbg
(
DBG_INTR
,
"intr: %d %p
\n
"
,
irq
,
card
);
if
(
card
->
state
!=
FST_RUNNING
)
{
printk_err
(
"Interrupt received for card %d in a non running state (%d)
\n
"
,
card
->
card_no
,
card
->
state
);
spin_lock
(
&
card
->
card_lock
);
/*
* It is possible to really be running, i.e. we have re-loaded
* a running card
* Clear and reprime the interrupt source
*/
fst_clear_intr
(
card
);
return
IRQ_HANDLED
;
}
/* Clear and reprime the interrupt source */
/* Clear and reprime the interrupt source */
fst_clear_intr
(
card
);
fst_clear_intr
(
card
);
/*
* Is the interrupt for this card (handshake == 1)
*/
do_card_interrupt
=
0
;
if
(
FST_RDB
(
card
,
interruptHandshake
)
==
1
)
{
do_card_interrupt
+=
FST_CARD_INT
;
/* Set the software acknowledge */
/* Set the software acknowledge */
FST_WRB
(
card
,
interruptHandshake
,
0xEE
);
FST_WRB
(
card
,
interruptHandshake
,
0xEE
);
}
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
/*
* Is it a DMA Interrupt
*/
dma_intcsr
=
inl
(
card
->
pci_conf
+
INTCSR_9054
);
if
(
dma_intcsr
&
0x00200000
)
{
/*
* DMA Channel 0 (Rx transfer complete)
*/
dbg
(
DBG_RX
,
"DMA Rx xfer complete
\n
"
);
outb
(
0x8
,
card
->
pci_conf
+
DMACSR0
);
fst_rx_dma_complete
(
card
,
card
->
dma_port_rx
,
card
->
dma_len_rx
,
card
->
dma_skb_rx
,
card
->
dma_rxpos
);
card
->
dmarx_in_progress
=
0
;
do_card_interrupt
+=
FST_RX_DMA_INT
;
}
if
(
dma_intcsr
&
0x00400000
)
{
/*
* DMA Channel 1 (Tx transfer complete)
*/
dbg
(
DBG_TX
,
"DMA Tx xfer complete
\n
"
);
outb
(
0x8
,
card
->
pci_conf
+
DMACSR1
);
fst_tx_dma_complete
(
card
,
card
->
dma_port_tx
,
card
->
dma_len_tx
,
card
->
dma_txpos
);
card
->
dmatx_in_progress
=
0
;
do_card_interrupt
+=
FST_TX_DMA_INT
;
}
}
/* Drain the event queue */
/*
rdidx
=
FST_RDB
(
card
,
interruptEvent
.
rdindex
);
* Have we been missing Interrupts
wridx
=
FST_RDB
(
card
,
interruptEvent
.
wrindex
);
*/
while
(
rdidx
!=
wridx
)
int_retry_count
=
FST_RDL
(
card
,
interruptRetryCount
);
{
if
(
int_retry_count
)
{
event
=
FST_RDB
(
card
,
interruptEvent
.
evntbuff
[
rdidx
]);
dbg
(
DBG_ASS
,
"Card %d int_retry_count is %d
\n
"
,
card
->
card_no
,
int_retry_count
);
FST_WRL
(
card
,
interruptRetryCount
,
0
);
}
if
(
!
do_card_interrupt
)
{
return
IRQ_HANDLED
;
}
/* Scehdule the bottom half of the ISR */
fst_q_work_item
(
&
fst_work_intq
,
card
->
card_no
);
tasklet_schedule
(
&
fst_int_task
);
/* Drain the event queue */
rdidx
=
FST_RDB
(
card
,
interruptEvent
.
rdindex
)
&
0x1f
;
wridx
=
FST_RDB
(
card
,
interruptEvent
.
wrindex
)
&
0x1f
;
while
(
rdidx
!=
wridx
)
{
event
=
FST_RDB
(
card
,
interruptEvent
.
evntbuff
[
rdidx
]);
port
=
&
card
->
ports
[
event
&
0x03
];
port
=
&
card
->
ports
[
event
&
0x03
];
dbg
(
DBG_INTR
,
"intr: %x
\n
"
,
event
);
dbg
(
DBG_INTR
,
"Processing Interrupt event: %x
\n
"
,
event
);
switch
(
event
)
{
case
TE1_ALMA
:
dbg
(
DBG_INTR
,
"TE1 Alarm intr
\n
"
);
if
(
port
->
run
)
fst_intr_te1_alarm
(
card
,
port
);
break
;
switch
(
event
)
{
case
CTLA_CHG
:
case
CTLA_CHG
:
case
CTLB_CHG
:
case
CTLB_CHG
:
case
CTLC_CHG
:
case
CTLC_CHG
:
case
CTLD_CHG
:
case
CTLD_CHG
:
if
(
port
->
run
)
if
(
port
->
run
)
fst_intr_ctlchg
(
card
,
port
);
fst_intr_ctlchg
(
card
,
port
);
break
;
break
;
case
ABTA_SENT
:
case
ABTA_SENT
:
case
ABTB_SENT
:
case
ABTB_SENT
:
case
ABTC_SENT
:
case
ABTC_SENT
:
case
ABTD_SENT
:
case
ABTD_SENT
:
dbg
(
DBG_TX
,
"Abort complete port %d
\n
"
,
event
&
0x03
);
dbg
(
DBG_TX
,
"Abort complete port %d
\n
"
,
port
->
index
);
break
;
break
;
case
TXA_UNDF
:
case
TXA_UNDF
:
...
@@ -835,95 +1633,65 @@ fst_intr ( int irq, void *dev_id, struct pt_regs *regs )
...
@@ -835,95 +1633,65 @@ fst_intr ( int irq, void *dev_id, struct pt_regs *regs )
/* Difficult to see how we'd get this given that we
/* Difficult to see how we'd get this given that we
* always load up the entire packet for DMA.
* always load up the entire packet for DMA.
*/
*/
dbg
(
DBG_TX
,
"Tx underflow port %d
\n
"
,
event
&
0x03
);
dbg
(
DBG_TX
,
"Tx underflow port %d
\n
"
,
port
->
index
);
hdlc_stats
(
port_to_dev
(
port
))
->
tx_errors
++
;
hdlc_stats
(
port_to_dev
(
port
))
->
tx_errors
++
;
hdlc_stats
(
port_to_dev
(
port
))
->
tx_fifo_errors
++
;
hdlc_stats
(
port_to_dev
(
port
))
->
tx_fifo_errors
;
dbg
(
DBG_ASS
,
"Tx underflow on card %d port %d
\n
"
,
card
->
card_no
,
port
->
index
);
break
;
break
;
case
INIT_CPLT
:
case
INIT_CPLT
:
dbg
(
DBG_INIT
,
"Card init OK intr
\n
"
);
dbg
(
DBG_INIT
,
"Card init OK intr
\n
"
);
break
;
break
;
case
INIT_FAIL
:
case
INIT_FAIL
:
dbg
(
DBG_INIT
,
"Card init FAILED intr
\n
"
);
dbg
(
DBG_INIT
,
"Card init FAILED intr
\n
"
);
card
->
state
=
FST_IFAILED
;
card
->
state
=
FST_IFAILED
;
break
;
break
;
default:
default:
printk_err
(
"intr: unknown card event code. ignored
\n
"
);
printk_err
(
"intr: unknown card event %d. ignored
\n
"
,
event
);
break
;
break
;
}
}
/* Bump and wrap the index */
/* Bump and wrap the index */
if
(
++
rdidx
>=
MAX_CIRBUFF
)
if
(
++
rdidx
>=
MAX_CIRBUFF
)
rdidx
=
0
;
rdidx
=
0
;
}
}
FST_WRB
(
card
,
interruptEvent
.
rdindex
,
rdidx
);
FST_WRB
(
card
,
interruptEvent
.
rdindex
,
rdidx
);
for
(
pi
=
0
,
port
=
card
->
ports
;
pi
<
card
->
nports
;
pi
++
,
port
++
)
{
if
(
!
port
->
run
)
continue
;
/* Check for rx completions */
while
(
!
(
FST_RDB
(
card
,
rxDescrRing
[
pi
][
port
->
rxpos
].
bits
)
&
DMA_OWN
))
{
fst_intr_rx
(
card
,
port
);
}
/* Check for Tx completions */
while
(
port
->
txcnt
>
0
&&
!
(
FST_RDB
(
card
,
txDescrRing
[
pi
][
port
->
txipos
].
bits
)
&
DMA_OWN
))
{
--
port
->
txcnt
;
if
(
++
port
->
txipos
>=
NUM_TX_BUFFER
)
port
->
txipos
=
0
;
netif_wake_queue
(
port_to_dev
(
port
));
}
}
spin_unlock
(
&
card
->
card_lock
);
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
}
}
/* Check that the shared memory configuration is one that we can handle
/* Check that the shared memory configuration is one that we can handle
* and that some basic parameters are correct
* and that some basic parameters are correct
*/
*/
static
void
static
void
check_started_ok
(
struct
fst_card_info
*
card
)
check_started_ok
(
struct
fst_card_info
*
card
)
{
{
int
i
;
int
i
;
/* Check structure version and end marker */
/* Check structure version and end marker */
if
(
FST_RDW
(
card
,
smcVersion
)
!=
SMC_VERSION
)
if
(
FST_RDW
(
card
,
smcVersion
)
!=
SMC_VERSION
)
{
{
printk_err
(
"Bad shared memory version %d expected %d
\n
"
,
printk_err
(
"Bad shared memory version %d expected %d
\n
"
,
FST_RDW
(
card
,
smcVersion
),
SMC_VERSION
);
FST_RDW
(
card
,
smcVersion
),
SMC_VERSION
);
card
->
state
=
FST_BADVERSION
;
card
->
state
=
FST_BADVERSION
;
return
;
return
;
}
}
if
(
FST_RDL
(
card
,
endOfSmcSignature
)
!=
END_SIG
)
if
(
FST_RDL
(
card
,
endOfSmcSignature
)
!=
END_SIG
)
{
{
printk_err
(
"Missing shared memory signature
\n
"
);
printk_err
(
"Missing shared memory signature
\n
"
);
card
->
state
=
FST_BADVERSION
;
card
->
state
=
FST_BADVERSION
;
return
;
return
;
}
}
/* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
/* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
if
((
i
=
FST_RDB
(
card
,
taskStatus
))
==
0x01
)
if
((
i
=
FST_RDB
(
card
,
taskStatus
))
==
0x01
)
{
{
card
->
state
=
FST_RUNNING
;
card
->
state
=
FST_RUNNING
;
}
}
else
if
(
i
==
0xFF
)
{
else
if
(
i
==
0xFF
)
printk_err
(
"Firmware initialisation failed. Card halted
\n
"
);
{
printk_err
(
"Firmware initialisation failed. Card halted
\n
"
);
card
->
state
=
FST_HALTED
;
card
->
state
=
FST_HALTED
;
return
;
return
;
}
}
else
if
(
i
!=
0x00
)
{
else
if
(
i
!=
0x00
)
printk_err
(
"Unknown firmware status 0x%x
\n
"
,
i
);
{
printk_err
(
"Unknown firmware status 0x%x
\n
"
,
i
);
card
->
state
=
FST_HALTED
;
card
->
state
=
FST_HALTED
;
return
;
return
;
}
}
...
@@ -932,52 +1700,113 @@ check_started_ok ( struct fst_card_info *card )
...
@@ -932,52 +1700,113 @@ check_started_ok ( struct fst_card_info *card )
* number we assumed at card detection. Should never happen with
* number we assumed at card detection. Should never happen with
* existing firmware etc so we just report it for the moment.
* existing firmware etc so we just report it for the moment.
*/
*/
if
(
FST_RDL
(
card
,
numberOfPorts
)
!=
card
->
nports
)
if
(
FST_RDL
(
card
,
numberOfPorts
)
!=
card
->
nports
)
{
{
printk_warn
(
"Port count mismatch on card %d."
printk_warn
(
"Port count mismatch."
" Firmware thinks %d we say %d
\n
"
,
card
->
card_no
,
" Firmware thinks %d we say %d
\n
"
,
FST_RDL
(
card
,
numberOfPorts
),
card
->
nports
);
FST_RDL
(
card
,
numberOfPorts
),
card
->
nports
);
}
}
}
}
static
int
static
int
set_conf_from_info
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
set_conf_from_info
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
struct
fstioc_info
*
info
)
struct
fstioc_info
*
info
)
{
{
int
err
;
int
err
;
unsigned
char
my_framing
;
/* Set things according to the user set valid flags.
/* Set things according to the user set valid flags
* Several of the old options have been invalidated/replaced by the
* Several of the old options have been invalidated/replaced by the
* generic HDLC
package.
* generic hdlc
package.
*/
*/
err
=
0
;
err
=
0
;
if
(
info
->
valid
&
FSTVAL_PROTO
)
if
(
info
->
valid
&
FSTVAL_PROTO
)
{
err
=
-
EINVAL
;
if
(
info
->
proto
==
FST_RAW
)
if
(
info
->
valid
&
FSTVAL_CABLE
)
port
->
mode
=
FST_RAW
;
else
port
->
mode
=
FST_GEN_HDLC
;
}
if
(
info
->
valid
&
FSTVAL_CABLE
)
err
=
-
EINVAL
;
err
=
-
EINVAL
;
if
(
info
->
valid
&
FSTVAL_SPEED
)
if
(
info
->
valid
&
FSTVAL_SPEED
)
err
=
-
EINVAL
;
err
=
-
EINVAL
;
if
(
info
->
valid
&
FSTVAL_MODE
)
if
(
info
->
valid
&
FSTVAL_PHASE
)
FST_WRW
(
card
,
cardMode
,
info
->
cardMode
);
FST_WRB
(
card
,
portConfig
[
port
->
index
].
invertClock
,
info
->
invertClock
);
if
(
info
->
valid
&
FSTVAL_MODE
)
FST_WRW
(
card
,
cardMode
,
info
->
cardMode
);
if
(
info
->
valid
&
FSTVAL_TE1
)
{
FST_WRL
(
card
,
suConfig
.
dataRate
,
info
->
lineSpeed
);
FST_WRB
(
card
,
suConfig
.
clocking
,
info
->
clockSource
);
my_framing
=
FRAMING_E1
;
if
(
info
->
framing
==
E1
)
my_framing
=
FRAMING_E1
;
if
(
info
->
framing
==
T1
)
my_framing
=
FRAMING_T1
;
if
(
info
->
framing
==
J1
)
my_framing
=
FRAMING_J1
;
FST_WRB
(
card
,
suConfig
.
framing
,
my_framing
);
FST_WRB
(
card
,
suConfig
.
structure
,
info
->
structure
);
FST_WRB
(
card
,
suConfig
.
interface
,
info
->
interface
);
FST_WRB
(
card
,
suConfig
.
coding
,
info
->
coding
);
FST_WRB
(
card
,
suConfig
.
lineBuildOut
,
info
->
lineBuildOut
);
FST_WRB
(
card
,
suConfig
.
equalizer
,
info
->
equalizer
);
FST_WRB
(
card
,
suConfig
.
transparentMode
,
info
->
transparentMode
);
FST_WRB
(
card
,
suConfig
.
loopMode
,
info
->
loopMode
);
FST_WRB
(
card
,
suConfig
.
range
,
info
->
range
);
FST_WRB
(
card
,
suConfig
.
txBufferMode
,
info
->
txBufferMode
);
FST_WRB
(
card
,
suConfig
.
rxBufferMode
,
info
->
rxBufferMode
);
FST_WRB
(
card
,
suConfig
.
startingSlot
,
info
->
startingSlot
);
FST_WRB
(
card
,
suConfig
.
losThreshold
,
info
->
losThreshold
);
if
(
info
->
idleCode
)
FST_WRB
(
card
,
suConfig
.
enableIdleCode
,
1
);
else
FST_WRB
(
card
,
suConfig
.
enableIdleCode
,
0
);
FST_WRB
(
card
,
suConfig
.
idleCode
,
info
->
idleCode
);
#if FST_DEBUG
if
(
info
->
valid
&
FSTVAL_TE1
)
{
printk
(
"Setting TE1 data
\n
"
);
printk
(
"Line Speed = %d
\n
"
,
info
->
lineSpeed
);
printk
(
"Start slot = %d
\n
"
,
info
->
startingSlot
);
printk
(
"Clock source = %d
\n
"
,
info
->
clockSource
);
printk
(
"Framing = %d
\n
"
,
my_framing
);
printk
(
"Structure = %d
\n
"
,
info
->
structure
);
printk
(
"interface = %d
\n
"
,
info
->
interface
);
printk
(
"Coding = %d
\n
"
,
info
->
coding
);
printk
(
"Line build out = %d
\n
"
,
info
->
lineBuildOut
);
printk
(
"Equaliser = %d
\n
"
,
info
->
equalizer
);
printk
(
"Transparent mode = %d
\n
"
,
info
->
transparentMode
);
printk
(
"Loop mode = %d
\n
"
,
info
->
loopMode
);
printk
(
"Range = %d
\n
"
,
info
->
range
);
printk
(
"Tx Buffer mode = %d
\n
"
,
info
->
txBufferMode
);
printk
(
"Rx Buffer mode = %d
\n
"
,
info
->
rxBufferMode
);
printk
(
"LOS Threshold = %d
\n
"
,
info
->
losThreshold
);
printk
(
"Idle Code = %d
\n
"
,
info
->
idleCode
);
}
#endif
}
#if FST_DEBUG
#if FST_DEBUG
if
(
info
->
valid
&
FSTVAL_DEBUG
)
if
(
info
->
valid
&
FSTVAL_DEBUG
)
{
fst_debug_mask
=
info
->
debug
;
fst_debug_mask
=
info
->
debug
;
}
#endif
#endif
return
err
;
return
err
;
}
}
static
void
static
void
gather_conf_info
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
gather_conf_info
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
struct
fstioc_info
*
info
)
struct
fstioc_info
*
info
)
{
{
int
i
;
int
i
;
memset
(
info
,
0
,
sizeof
(
struct
fstioc_info
));
memset
(
info
,
0
,
sizeof
(
struct
fstioc_info
));
i
=
port
->
index
;
i
=
port
->
index
;
info
->
kernelVersion
=
LINUX_VERSION_CODE
;
info
->
nports
=
card
->
nports
;
info
->
nports
=
card
->
nports
;
info
->
type
=
card
->
type
;
info
->
type
=
card
->
type
;
info
->
state
=
card
->
state
;
info
->
state
=
card
->
state
;
...
@@ -990,58 +1819,132 @@ gather_conf_info ( struct fst_card_info *card, struct fst_port_info *port,
...
@@ -990,58 +1819,132 @@ gather_conf_info ( struct fst_card_info *card, struct fst_port_info *port,
/* Only mark information as valid if card is running.
/* Only mark information as valid if card is running.
* Copy the data anyway in case it is useful for diagnostics
* Copy the data anyway in case it is useful for diagnostics
*/
*/
info
->
valid
info
->
valid
=
((
card
->
state
==
FST_RUNNING
)
?
FSTVAL_ALL
:
FSTVAL_CARD
)
=
((
card
->
state
==
FST_RUNNING
)
?
FSTVAL_ALL
:
FSTVAL_CARD
)
#if FST_DEBUG
#if FST_DEBUG
|
FSTVAL_DEBUG
|
FSTVAL_DEBUG
#endif
#endif
;
;
info
->
lineInterface
=
FST_RDW
(
card
,
portConfig
[
i
].
lineInterface
);
info
->
lineInterface
=
FST_RDW
(
card
,
portConfig
[
i
].
lineInterface
);
info
->
internalClock
=
FST_RDB
(
card
,
portConfig
[
i
].
internalClock
);
info
->
internalClock
=
FST_RDB
(
card
,
portConfig
[
i
].
internalClock
);
info
->
lineSpeed
=
FST_RDL
(
card
,
portConfig
[
i
].
lineSpeed
);
info
->
lineSpeed
=
FST_RDL
(
card
,
portConfig
[
i
].
lineSpeed
);
info
->
v24IpSts
=
FST_RDL
(
card
,
v24IpSts
[
i
]
);
info
->
invertClock
=
FST_RDB
(
card
,
portConfig
[
i
].
invertClock
);
info
->
v24OpSts
=
FST_RDL
(
card
,
v24OpSts
[
i
]
);
info
->
v24IpSts
=
FST_RDL
(
card
,
v24IpSts
[
i
]);
info
->
clockStatus
=
FST_RDW
(
card
,
clockStatus
[
i
]
);
info
->
v24OpSts
=
FST_RDL
(
card
,
v24OpSts
[
i
]);
info
->
cableStatus
=
FST_RDW
(
card
,
cableStatus
);
info
->
clockStatus
=
FST_RDW
(
card
,
clockStatus
[
i
]);
info
->
cardMode
=
FST_RDW
(
card
,
cardMode
);
info
->
cableStatus
=
FST_RDW
(
card
,
cableStatus
);
info
->
smcFirmwareVersion
=
FST_RDL
(
card
,
smcFirmwareVersion
);
info
->
cardMode
=
FST_RDW
(
card
,
cardMode
);
info
->
smcFirmwareVersion
=
FST_RDL
(
card
,
smcFirmwareVersion
);
/*
* The T2U can report cable presence for both A or B
* in bits 0 and 1 of cableStatus. See which port we are and
* do the mapping.
*/
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
if
(
port
->
index
==
0
)
{
/*
* Port A
*/
info
->
cableStatus
=
info
->
cableStatus
&
1
;
}
else
{
/*
* Port B
*/
info
->
cableStatus
=
info
->
cableStatus
>>
1
;
info
->
cableStatus
=
info
->
cableStatus
&
1
;
}
}
/*
* Some additional bits if we are TE1
*/
if
(
card
->
type
==
FST_TYPE_TE1
)
{
info
->
lineSpeed
=
FST_RDL
(
card
,
suConfig
.
dataRate
);
info
->
clockSource
=
FST_RDB
(
card
,
suConfig
.
clocking
);
info
->
framing
=
FST_RDB
(
card
,
suConfig
.
framing
);
info
->
structure
=
FST_RDB
(
card
,
suConfig
.
structure
);
info
->
interface
=
FST_RDB
(
card
,
suConfig
.
interface
);
info
->
coding
=
FST_RDB
(
card
,
suConfig
.
coding
);
info
->
lineBuildOut
=
FST_RDB
(
card
,
suConfig
.
lineBuildOut
);
info
->
equalizer
=
FST_RDB
(
card
,
suConfig
.
equalizer
);
info
->
loopMode
=
FST_RDB
(
card
,
suConfig
.
loopMode
);
info
->
range
=
FST_RDB
(
card
,
suConfig
.
range
);
info
->
txBufferMode
=
FST_RDB
(
card
,
suConfig
.
txBufferMode
);
info
->
rxBufferMode
=
FST_RDB
(
card
,
suConfig
.
rxBufferMode
);
info
->
startingSlot
=
FST_RDB
(
card
,
suConfig
.
startingSlot
);
info
->
losThreshold
=
FST_RDB
(
card
,
suConfig
.
losThreshold
);
if
(
FST_RDB
(
card
,
suConfig
.
enableIdleCode
))
info
->
idleCode
=
FST_RDB
(
card
,
suConfig
.
idleCode
);
else
info
->
idleCode
=
0
;
info
->
receiveBufferDelay
=
FST_RDL
(
card
,
suStatus
.
receiveBufferDelay
);
info
->
framingErrorCount
=
FST_RDL
(
card
,
suStatus
.
framingErrorCount
);
info
->
codeViolationCount
=
FST_RDL
(
card
,
suStatus
.
codeViolationCount
);
info
->
crcErrorCount
=
FST_RDL
(
card
,
suStatus
.
crcErrorCount
);
info
->
lineAttenuation
=
FST_RDL
(
card
,
suStatus
.
lineAttenuation
);
info
->
lossOfSignal
=
FST_RDB
(
card
,
suStatus
.
lossOfSignal
);
info
->
receiveRemoteAlarm
=
FST_RDB
(
card
,
suStatus
.
receiveRemoteAlarm
);
info
->
alarmIndicationSignal
=
FST_RDB
(
card
,
suStatus
.
alarmIndicationSignal
);
}
}
}
static
int
static
int
fst_set_iface
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
fst_set_iface
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
struct
ifreq
*
ifr
)
struct
ifreq
*
ifr
)
{
{
sync_serial_settings
sync
;
sync_serial_settings
sync
;
int
i
;
int
i
;
if
(
copy_from_user
(
&
sync
,
ifr
->
ifr_settings
.
ifs_ifsu
.
sync
,
if
(
ifr
->
ifr_settings
.
size
!=
sizeof
(
sync
))
{
sizeof
(
sync
)))
return
-
ENOMEM
;
}
if
(
copy_from_user
(
&
sync
,
ifr
->
ifr_settings
.
ifs_ifsu
.
sync
,
sizeof
(
sync
)))
{
return
-
EFAULT
;
return
-
EFAULT
;
}
if
(
sync
.
loopback
)
if
(
sync
.
loopback
)
return
-
EINVAL
;
return
-
EINVAL
;
i
=
port
->
index
;
i
=
port
->
index
;
switch
(
ifr
->
ifr_settings
.
type
)
switch
(
ifr
->
ifr_settings
.
type
)
{
{
case
IF_IFACE_V35
:
case
IF_IFACE_V35
:
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
V35
);
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
V35
);
port
->
hwif
=
V35
;
port
->
hwif
=
V35
;
break
;
break
;
case
IF_IFACE_V24
:
case
IF_IFACE_V24
:
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
V24
);
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
V24
);
port
->
hwif
=
V24
;
port
->
hwif
=
V24
;
break
;
break
;
case
IF_IFACE_X21
:
case
IF_IFACE_X21
:
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
X21
);
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
X21
);
port
->
hwif
=
X21
;
port
->
hwif
=
X21
;
break
;
break
;
case
IF_IFACE_X21D
:
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
X21D
);
port
->
hwif
=
X21D
;
break
;
case
IF_IFACE_T1
:
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
T1
);
port
->
hwif
=
T1
;
break
;
case
IF_IFACE_E1
:
FST_WRW
(
card
,
portConfig
[
i
].
lineInterface
,
E1
);
port
->
hwif
=
E1
;
break
;
case
IF_IFACE_SYNC_SERIAL
:
case
IF_IFACE_SYNC_SERIAL
:
break
;
break
;
...
@@ -1049,26 +1952,25 @@ fst_set_iface ( struct fst_card_info *card, struct fst_port_info *port,
...
@@ -1049,26 +1952,25 @@ fst_set_iface ( struct fst_card_info *card, struct fst_port_info *port,
return
-
EINVAL
;
return
-
EINVAL
;
}
}
switch
(
sync
.
clock_type
)
switch
(
sync
.
clock_type
)
{
{
case
CLOCK_EXT
:
case
CLOCK_EXT
:
FST_WRB
(
card
,
portConfig
[
i
].
internalClock
,
EXTCLK
);
FST_WRB
(
card
,
portConfig
[
i
].
internalClock
,
EXTCLK
);
break
;
break
;
case
CLOCK_INT
:
case
CLOCK_INT
:
FST_WRB
(
card
,
portConfig
[
i
].
internalClock
,
INTCLK
);
FST_WRB
(
card
,
portConfig
[
i
].
internalClock
,
INTCLK
);
break
;
break
;
default:
default:
return
-
EINVAL
;
return
-
EINVAL
;
}
}
FST_WRL
(
card
,
portConfig
[
i
].
lineSpeed
,
sync
.
clock_rate
);
FST_WRL
(
card
,
portConfig
[
i
].
lineSpeed
,
sync
.
clock_rate
);
return
0
;
return
0
;
}
}
static
int
static
int
fst_get_iface
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
fst_get_iface
(
struct
fst_card_info
*
card
,
struct
fst_port_info
*
port
,
struct
ifreq
*
ifr
)
struct
ifreq
*
ifr
)
{
{
sync_serial_settings
sync
;
sync_serial_settings
sync
;
int
i
;
int
i
;
...
@@ -1077,41 +1979,51 @@ fst_get_iface ( struct fst_card_info *card, struct fst_port_info *port,
...
@@ -1077,41 +1979,51 @@ fst_get_iface ( struct fst_card_info *card, struct fst_port_info *port,
* if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
* if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
* changed
* changed
*/
*/
switch
(
port
->
hwif
)
switch
(
port
->
hwif
)
{
{
case
E1
:
ifr
->
ifr_settings
.
type
=
IF_IFACE_E1
;
break
;
case
T1
:
ifr
->
ifr_settings
.
type
=
IF_IFACE_T1
;
break
;
case
V35
:
case
V35
:
ifr
->
ifr_settings
.
type
=
IF_IFACE_V35
;
ifr
->
ifr_settings
.
type
=
IF_IFACE_V35
;
break
;
break
;
case
V24
:
case
V24
:
ifr
->
ifr_settings
.
type
=
IF_IFACE_V24
;
ifr
->
ifr_settings
.
type
=
IF_IFACE_V24
;
break
;
break
;
case
X21D
:
ifr
->
ifr_settings
.
type
=
IF_IFACE_X21D
;
break
;
case
X21
:
case
X21
:
default:
default:
ifr
->
ifr_settings
.
type
=
IF_IFACE_X21
;
ifr
->
ifr_settings
.
type
=
IF_IFACE_X21
;
break
;
break
;
}
}
if
(
ifr
->
ifr_settings
.
size
==
0
)
{
if
(
ifr
->
ifr_settings
.
size
<
sizeof
(
sync
))
{
return
0
;
/* only type requested */
ifr
->
ifr_settings
.
size
=
sizeof
(
sync
);
/* data size wanted */
}
return
-
ENOBUFS
;
if
(
ifr
->
ifr_settings
.
size
<
sizeof
(
sync
))
{
return
-
ENOMEM
;
}
}
i
=
port
->
index
;
i
=
port
->
index
;
sync
.
clock_rate
=
FST_RDL
(
card
,
portConfig
[
i
].
lineSpeed
);
sync
.
clock_rate
=
FST_RDL
(
card
,
portConfig
[
i
].
lineSpeed
);
/* Lucky card and linux use same encoding here */
/* Lucky card and linux use same encoding here */
sync
.
clock_type
=
FST_RDB
(
card
,
portConfig
[
i
].
internalClock
);
sync
.
clock_type
=
FST_RDB
(
card
,
portConfig
[
i
].
internalClock
)
==
INTCLK
?
CLOCK_INT
:
CLOCK_EXT
;
sync
.
loopback
=
0
;
sync
.
loopback
=
0
;
if
(
copy_to_user
(
ifr
->
ifr_settings
.
ifs_ifsu
.
sync
,
&
sync
,
if
(
copy_to_user
(
ifr
->
ifr_settings
.
ifs_ifsu
.
sync
,
&
sync
,
sizeof
(
sync
)))
{
sizeof
(
sync
)))
return
-
EFAULT
;
return
-
EFAULT
;
}
ifr
->
ifr_settings
.
size
=
sizeof
(
sync
);
return
0
;
return
0
;
}
}
static
int
static
int
fst_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
ifr
,
int
cmd
)
fst_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
ifr
,
int
cmd
)
{
{
struct
fst_card_info
*
card
;
struct
fst_card_info
*
card
;
struct
fst_port_info
*
port
;
struct
fst_port_info
*
port
;
...
@@ -1119,23 +2031,22 @@ fst_ioctl ( struct net_device *dev, struct ifreq *ifr, int cmd )
...
@@ -1119,23 +2031,22 @@ fst_ioctl ( struct net_device *dev, struct ifreq *ifr, int cmd )
struct
fstioc_info
info
;
struct
fstioc_info
info
;
unsigned
long
flags
;
unsigned
long
flags
;
dbg
(
DBG_IOCTL
,
"ioctl: %x, %p
\n
"
,
cmd
,
ifr
->
ifr_data
);
dbg
(
DBG_IOCTL
,
"ioctl: %x, %p
\n
"
,
cmd
,
ifr
->
ifr_data
);
port
=
dev_to_port
(
dev
);
port
=
dev_to_port
(
dev
);
card
=
port
->
card
;
card
=
port
->
card
;
if
(
!
capable
(
CAP_NET_ADMIN
))
if
(
!
capable
(
CAP_NET_ADMIN
))
return
-
EPERM
;
return
-
EPERM
;
switch
(
cmd
)
switch
(
cmd
)
{
{
case
FSTCPURESET
:
case
FSTCPURESET
:
fst_cpureset
(
card
);
fst_cpureset
(
card
);
card
->
state
=
FST_RESET
;
card
->
state
=
FST_RESET
;
return
0
;
return
0
;
case
FSTCPURELEASE
:
case
FSTCPURELEASE
:
fst_cpurelease
(
card
);
fst_cpurelease
(
card
);
card
->
state
=
FST_STARTING
;
card
->
state
=
FST_STARTING
;
return
0
;
return
0
;
...
@@ -1144,22 +2055,19 @@ fst_ioctl ( struct net_device *dev, struct ifreq *ifr, int cmd )
...
@@ -1144,22 +2055,19 @@ fst_ioctl ( struct net_device *dev, struct ifreq *ifr, int cmd )
/* First copy in the header with the length and offset of data
/* First copy in the header with the length and offset of data
* to write
* to write
*/
*/
if
(
ifr
->
ifr_data
==
NULL
)
if
(
ifr
->
ifr_data
==
NULL
)
{
{
return
-
EINVAL
;
return
-
EINVAL
;
}
}
if
(
copy_from_user
(
&
wrthdr
,
ifr
->
ifr_data
,
if
(
copy_from_user
(
&
wrthdr
,
ifr
->
ifr_data
,
sizeof
(
struct
fstioc_write
)))
sizeof
(
struct
fstioc_write
)))
{
{
return
-
EFAULT
;
return
-
EFAULT
;
}
}
/* Sanity check the parameters. We don't support partial writes
/* Sanity check the parameters. We don't support partial writes
* when going over the top
* when going over the top
*/
*/
if
(
wrthdr
.
size
>
FST_MEMSIZE
||
wrthdr
.
offset
>
FST_MEMSIZE
if
(
wrthdr
.
size
>
FST_MEMSIZE
||
wrthdr
.
offset
>
FST_MEMSIZE
||
wrthdr
.
size
+
wrthdr
.
offset
>
FST_MEMSIZE
)
||
wrthdr
.
size
+
wrthdr
.
offset
>
FST_MEMSIZE
)
{
{
return
-
ENXIO
;
return
-
ENXIO
;
}
}
...
@@ -1167,18 +2075,16 @@ fst_ioctl ( struct net_device *dev, struct ifreq *ifr, int cmd )
...
@@ -1167,18 +2075,16 @@ fst_ioctl ( struct net_device *dev, struct ifreq *ifr, int cmd )
* This will probably break on some architectures.
* This will probably break on some architectures.
* I'll fix it when I have something to test on.
* I'll fix it when I have something to test on.
*/
*/
if
(
copy_from_user
(
card
->
mem
+
wrthdr
.
offset
,
if
(
copy_from_user
(
card
->
mem
+
wrthdr
.
offset
,
ifr
->
ifr_data
+
sizeof
(
struct
fstioc_write
),
ifr
->
ifr_data
+
sizeof
(
struct
fstioc_write
),
wrthdr
.
size
))
wrthdr
.
size
))
{
{
return
-
EFAULT
;
return
-
EFAULT
;
}
}
/* Writes to the memory of a card in the reset state constitute
/* Writes to the memory of a card in the reset state constitute
* a download
* a download
*/
*/
if
(
card
->
state
==
FST_RESET
)
if
(
card
->
state
==
FST_RESET
)
{
{
card
->
state
=
FST_DOWNLOAD
;
card
->
state
=
FST_DOWNLOAD
;
}
}
return
0
;
return
0
;
...
@@ -1188,250 +2094,302 @@ fst_ioctl ( struct net_device *dev, struct ifreq *ifr, int cmd )
...
@@ -1188,250 +2094,302 @@ fst_ioctl ( struct net_device *dev, struct ifreq *ifr, int cmd )
/* If card has just been started check the shared memory config
/* If card has just been started check the shared memory config
* version and marker
* version and marker
*/
*/
if
(
card
->
state
==
FST_STARTING
)
if
(
card
->
state
==
FST_STARTING
)
{
{
check_started_ok
(
card
);
check_started_ok
(
card
);
/* If everything checked out enable card interrupts */
/* If everything checked out enable card interrupts */
if
(
card
->
state
==
FST_RUNNING
)
if
(
card
->
state
==
FST_RUNNING
)
{
{
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
fst_enable_intr
(
card
);
fst_clear_intr
(
card
);
FST_WRB
(
card
,
interruptHandshake
,
0xEE
);
FST_WRB
(
card
,
interruptHandshake
,
0xEE
);
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
}
}
}
}
if
(
ifr
->
ifr_data
==
NULL
)
if
(
ifr
->
ifr_data
==
NULL
)
{
{
return
-
EINVAL
;
return
-
EINVAL
;
}
}
gather_conf_info
(
card
,
port
,
&
info
);
gather_conf_info
(
card
,
port
,
&
info
);
if
(
copy_to_user
(
ifr
->
ifr_data
,
&
info
,
sizeof
(
info
)))
if
(
copy_to_user
(
ifr
->
ifr_data
,
&
info
,
sizeof
(
info
)))
{
{
return
-
EFAULT
;
return
-
EFAULT
;
}
}
return
0
;
return
0
;
case
FSTSETCONF
:
case
FSTSETCONF
:
/* Most of the setting have been moved to the generic ioctls
/*
* this just covers debug and board ident mode now
* Most of the settings have been moved to the generic ioctls
* this just covers debug and board ident now
*/
*/
if
(
copy_from_user
(
&
info
,
ifr
->
ifr_data
,
sizeof
(
info
)))
{
if
(
card
->
state
!=
FST_RUNNING
)
{
printk_err
(
"Attempt to configure card %d in non-running state (%d)
\n
"
,
card
->
card_no
,
card
->
state
);
return
-
EIO
;
}
if
(
copy_from_user
(
&
info
,
ifr
->
ifr_data
,
sizeof
(
info
)))
{
return
-
EFAULT
;
return
-
EFAULT
;
}
}
return
set_conf_from_info
(
card
,
port
,
&
info
);
return
set_conf_from_info
(
card
,
port
,
&
info
);
case
SIOCWANDEV
:
case
SIOCWANDEV
:
switch
(
ifr
->
ifr_settings
.
type
)
switch
(
ifr
->
ifr_settings
.
type
)
{
{
case
IF_GET_IFACE
:
case
IF_GET_IFACE
:
return
fst_get_iface
(
card
,
port
,
ifr
);
return
fst_get_iface
(
card
,
port
,
ifr
);
case
IF_IFACE_SYNC_SERIAL
:
case
IF_IFACE_SYNC_SERIAL
:
case
IF_IFACE_V35
:
case
IF_IFACE_V35
:
case
IF_IFACE_V24
:
case
IF_IFACE_V24
:
case
IF_IFACE_X21
:
case
IF_IFACE_X21
:
return
fst_set_iface
(
card
,
port
,
ifr
);
case
IF_IFACE_X21D
:
case
IF_IFACE_T1
:
case
IF_IFACE_E1
:
return
fst_set_iface
(
card
,
port
,
ifr
);
case
IF_PROTO_RAW
:
port
->
mode
=
FST_RAW
;
return
0
;
case
IF_GET_PROTO
:
if
(
port
->
mode
==
FST_RAW
)
{
ifr
->
ifr_settings
.
type
=
IF_PROTO_RAW
;
return
0
;
}
return
hdlc_ioctl
(
dev
,
ifr
,
cmd
);
default:
default:
return
hdlc_ioctl
(
dev
,
ifr
,
cmd
);
port
->
mode
=
FST_GEN_HDLC
;
dbg
(
DBG_IOCTL
,
"Passing this type to hdlc %x
\n
"
,
ifr
->
ifr_settings
.
type
);
return
hdlc_ioctl
(
dev
,
ifr
,
cmd
);
}
}
default:
default:
/* Not one of ours. Pass through to HDLC package */
/* Not one of ours. Pass through to HDLC package */
return
hdlc_ioctl
(
dev
,
ifr
,
cmd
);
return
hdlc_ioctl
(
dev
,
ifr
,
cmd
);
}
}
}
}
static
void
static
void
fst_openport
(
struct
fst_port_info
*
port
)
fst_openport
(
struct
fst_port_info
*
port
)
{
{
int
signals
;
int
signals
;
int
txq_length
;
/* Only init things if card is actually running. This allows open to
/* Only init things if card is actually running. This allows open to
* succeed for downloads etc.
* succeed for downloads etc.
*/
*/
if
(
port
->
card
->
state
==
FST_RUNNING
)
if
(
port
->
card
->
state
==
FST_RUNNING
)
{
{
if
(
port
->
run
)
{
if
(
port
->
run
)
dbg
(
DBG_OPEN
,
"open: found port already running
\n
"
);
{
dbg
(
DBG_OPEN
,
"open: found port already running
\n
"
);
fst_issue_cmd
(
port
,
STOPPORT
);
fst_issue_cmd
(
port
,
STOPPORT
);
port
->
run
=
0
;
port
->
run
=
0
;
}
}
fst_rx_config
(
port
);
fst_rx_config
(
port
);
fst_tx_config
(
port
);
fst_tx_config
(
port
);
fst_op_raise
(
port
,
OPSTS_RTS
|
OPSTS_DTR
);
fst_op_raise
(
port
,
OPSTS_RTS
|
OPSTS_DTR
);
fst_issue_cmd
(
port
,
STARTPORT
);
fst_issue_cmd
(
port
,
STARTPORT
);
port
->
run
=
1
;
port
->
run
=
1
;
signals
=
FST_RDL
(
port
->
card
,
v24DebouncedSts
[
port
->
index
]);
signals
=
FST_RDL
(
port
->
card
,
v24DebouncedSts
[
port
->
index
]);
if
(
signals
&
((
port
->
hwif
==
X21
)
?
IPSTS_INDICATE
if
(
signals
&
(((
port
->
hwif
==
X21
)
||
(
port
->
hwif
==
X21D
))
:
IPSTS_DCD
))
?
IPSTS_INDICATE
:
IPSTS_DCD
))
netif_carrier_on
(
port_to_dev
(
port
));
netif_carrier_on
(
port_to_dev
(
port
));
else
else
netif_carrier_off
(
port_to_dev
(
port
));
netif_carrier_off
(
port_to_dev
(
port
));
txq_length
=
port
->
txqe
-
port
->
txqs
;
port
->
txqe
=
0
;
port
->
txqs
=
0
;
}
}
}
}
static
void
static
void
fst_closeport
(
struct
fst_port_info
*
port
)
fst_closeport
(
struct
fst_port_info
*
port
)
{
{
if
(
port
->
card
->
state
==
FST_RUNNING
)
if
(
port
->
card
->
state
==
FST_RUNNING
)
{
{
if
(
port
->
run
)
{
if
(
port
->
run
)
{
port
->
run
=
0
;
port
->
run
=
0
;
fst_op_lower
(
port
,
OPSTS_RTS
|
OPSTS_DTR
);
fst_op_lower
(
port
,
OPSTS_RTS
|
OPSTS_DTR
);
fst_issue_cmd
(
port
,
STOPPORT
);
fst_issue_cmd
(
port
,
STOPPORT
);
}
}
else
{
else
dbg
(
DBG_OPEN
,
"close: port not running
\n
"
);
{
dbg
(
DBG_OPEN
,
"close: port not running
\n
"
);
}
}
}
}
}
}
static
int
static
int
fst_open
(
struct
net_device
*
dev
)
fst_open
(
struct
net_device
*
dev
)
{
{
int
err
;
int
err
;
struct
fst_port_info
*
port
;
port
=
dev_to_port
(
dev
);
if
(
!
try_module_get
(
THIS_MODULE
))
return
-
EBUSY
;
err
=
hdlc_open
(
dev
);
if
(
port
->
mode
!=
FST_RAW
)
{
if
(
err
)
err
=
hdlc_open
(
dev
);
if
(
err
)
return
err
;
return
err
;
}
fst_openport
(
dev_to_port
(
dev
)
);
fst_openport
(
port
);
netif_wake_queue
(
dev
);
netif_wake_queue
(
dev
);
return
0
;
return
0
;
}
}
static
int
static
int
fst_close
(
struct
net_device
*
dev
)
fst_close
(
struct
net_device
*
dev
)
{
{
netif_stop_queue
(
dev
);
struct
fst_port_info
*
port
;
fst_closeport
(
dev_to_port
(
dev
));
struct
fst_card_info
*
card
;
hdlc_close
(
dev
);
unsigned
char
tx_dma_done
;
unsigned
char
rx_dma_done
;
port
=
dev_to_port
(
dev
);
card
=
port
->
card
;
tx_dma_done
=
inb
(
card
->
pci_conf
+
DMACSR1
);
rx_dma_done
=
inb
(
card
->
pci_conf
+
DMACSR0
);
dbg
(
DBG_OPEN
,
"Port Close: tx_dma_in_progress = %d (%x) rx_dma_in_progress = %d (%x)
\n
"
,
card
->
dmatx_in_progress
,
tx_dma_done
,
card
->
dmarx_in_progress
,
rx_dma_done
);
netif_stop_queue
(
dev
);
fst_closeport
(
dev_to_port
(
dev
));
if
(
port
->
mode
!=
FST_RAW
)
{
hdlc_close
(
dev
);
}
module_put
(
THIS_MODULE
);
return
0
;
return
0
;
}
}
static
int
static
int
fst_attach
(
struct
net_device
*
dev
,
unsigned
short
encoding
,
unsigned
short
parity
)
fst_attach
(
struct
net_device
*
dev
,
unsigned
short
encoding
,
unsigned
short
parity
)
{
{
/* Setting currently fixed in FarSync card so we check and forget */
/*
if
(
encoding
!=
ENCODING_NRZ
||
parity
!=
PARITY_CRC16_PR1_CCITT
)
* Setting currently fixed in FarSync card so we check and forget
*/
if
(
encoding
!=
ENCODING_NRZ
||
parity
!=
PARITY_CRC16_PR1_CCITT
)
return
-
EINVAL
;
return
-
EINVAL
;
return
0
;
return
0
;
}
}
static
void
static
void
fst_tx_timeout
(
struct
net_device
*
dev
)
fst_tx_timeout
(
struct
net_device
*
dev
)
{
{
struct
fst_port_info
*
port
;
struct
fst_port_info
*
port
;
struct
fst_card_info
*
card
;
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
);
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
);
dbg
(
DBG_INTR
|
DBG_TX
,
"tx_timeout
\n
"
);
port
=
dev_to_port
(
dev
);
card
=
port
->
card
;
port
=
dev_to_port
(
dev
);
stats
->
tx_errors
++
;
stats
->
tx_errors
++
;
stats
->
tx_aborted_errors
++
;
stats
->
tx_aborted_errors
++
;
dbg
(
DBG_ASS
,
"Tx timeout card %d port %d
\n
"
,
if
(
port
->
txcnt
>
0
)
card
->
card_no
,
port
->
index
);
fst_issue_cmd
(
port
,
ABORTTX
);
fst_issue_cmd
(
port
,
ABORTTX
);
dev
->
trans_start
=
jiffies
;
dev
->
trans_start
=
jiffies
;
netif_wake_queue
(
dev
);
netif_wake_queue
(
dev
);
port
->
start
=
0
;
}
}
static
int
static
int
fst_start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
fst_start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
{
{
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
);
struct
fst_card_info
*
card
;
struct
fst_card_info
*
card
;
struct
fst_port_info
*
port
;
struct
fst_port_info
*
port
;
unsigned
char
dmabits
;
struct
net_device_stats
*
stats
=
hdlc_stats
(
dev
)
;
unsigned
long
flags
;
unsigned
long
flags
;
int
pi
;
int
txq_length
;
int
txp
;
port
=
dev_to_port
(
dev
);
port
=
dev_to_port
(
dev
);
card
=
port
->
card
;
card
=
port
->
card
;
dbg
(
DBG_TX
,
"fst_start_xmit: length = %d
\n
"
,
skb
->
len
);
/* Drop packet with error if we don't have carrier */
/* Drop packet with error if we don't have carrier */
if
(
!
netif_carrier_ok
(
dev
))
if
(
!
netif_carrier_ok
(
dev
))
{
{
dev_kfree_skb
(
skb
);
dev_kfree_skb
(
skb
);
stats
->
tx_errors
++
;
stats
->
tx_errors
++
;
stats
->
tx_carrier_errors
++
;
stats
->
tx_carrier_errors
++
;
dbg
(
DBG_ASS
,
"Tried to transmit but no carrier on card %d port %d
\n
"
,
card
->
card_no
,
port
->
index
);
return
0
;
return
0
;
}
}
/* Drop it if it's too big! MTU failure ? */
/* Drop it if it's too big! MTU failure ? */
if
(
skb
->
len
>
LEN_TX_BUFFER
)
if
(
skb
->
len
>
LEN_TX_BUFFER
)
{
{
dbg
(
DBG_ASS
,
"Packet too large %d vs %d
\n
"
,
skb
->
len
,
dbg
(
DBG_TX
,
"Packet too large %d vs %d
\n
"
,
skb
->
len
,
LEN_TX_BUFFER
);
LEN_TX_BUFFER
);
dev_kfree_skb
(
skb
);
dev_kfree_skb
(
skb
);
stats
->
tx_errors
++
;
stats
->
tx_errors
++
;
return
0
;
return
0
;
}
}
/* Check we have a buffer */
/*
pi
=
port
->
index
;
* We are always going to queue the packet
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
* so that the bottom half is the only place we tx from
txp
=
port
->
txpos
;
* Check there is room in the port txq
dmabits
=
FST_RDB
(
card
,
txDescrRing
[
pi
][
txp
].
bits
);
*/
if
(
dmabits
&
DMA_OWN
)
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
{
if
((
txq_length
=
port
->
txqe
-
port
->
txqs
)
<
0
)
{
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
/*
dbg
(
DBG_TX
,
"Out of Tx buffers
\n
"
);
* This is the case where the next free has wrapped but the
dev_kfree_skb
(
skb
);
* last used hasn't
*/
txq_length
=
txq_length
+
FST_TXQ_DEPTH
;
}
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
if
(
txq_length
>
fst_txq_high
)
{
/*
* We have got enough buffers in the pipeline. Ask the network
* layer to stop sending frames down
*/
netif_stop_queue
(
dev
);
port
->
start
=
1
;
/* I'm using this to signal stop sent up */
}
if
(
txq_length
==
FST_TXQ_DEPTH
-
1
)
{
/*
* This shouldn't have happened but such is life
*/
dev_kfree_skb
(
skb
);
stats
->
tx_errors
++
;
stats
->
tx_errors
++
;
dbg
(
DBG_ASS
,
"Tx queue overflow card %d port %d
\n
"
,
card
->
card_no
,
port
->
index
);
return
0
;
return
0
;
}
}
if
(
++
port
->
txpos
>=
NUM_TX_BUFFER
)
port
->
txpos
=
0
;
if
(
++
port
->
txcnt
>=
NUM_TX_BUFFER
)
/*
netif_stop_queue
(
dev
);
* queue the buffer
/* Release the card lock before we copy the data as we now have
* exclusive access to the buffer.
*/
*/
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
spin_lock_irqsave
(
&
card
->
card_lock
,
flags
);
port
->
txq
[
port
->
txqe
]
=
skb
;
/* Enqueue the packet */
port
->
txqe
++
;
memcpy_toio
(
card
->
mem
+
BUF_OFFSET
(
txBuffer
[
pi
][
txp
][
0
]),
if
(
port
->
txqe
==
FST_TXQ_DEPTH
)
skb
->
data
,
skb
->
len
);
port
->
txqe
=
0
;
FST_WRW
(
card
,
txDescrRing
[
pi
][
txp
].
bcnt
,
cnv_bcnt
(
skb
->
len
));
spin_unlock_irqrestore
(
&
card
->
card_lock
,
flags
);
FST_WRB
(
card
,
txDescrRing
[
pi
][
txp
].
bits
,
DMA_OWN
|
TX_STP
|
TX_ENP
);
stats
->
tx_packets
++
;
/* Scehdule the bottom half which now does transmit processing */
stats
->
tx_bytes
+=
skb
->
len
;
fst_q_work_item
(
&
fst_work_txq
,
card
->
card_no
);
tasklet_schedule
(
&
fst_tx_task
);
dev_kfree_skb
(
skb
);
dev
->
trans_start
=
jiffies
;
return
0
;
return
0
;
}
}
/*
/*
* Card setup having checked hardware resources.
* Card setup having checked hardware resources.
* Should be pretty bizarre if we get an error here (kernel memory
* Should be pretty bizarre if we get an error here (kernel memory
...
@@ -1442,11 +2400,15 @@ fst_start_xmit ( struct sk_buff *skb, struct net_device *dev )
...
@@ -1442,11 +2400,15 @@ fst_start_xmit ( struct sk_buff *skb, struct net_device *dev )
static
char
*
type_strings
[]
__devinitdata
=
{
static
char
*
type_strings
[]
__devinitdata
=
{
"no hardware"
,
/* Should never be seen */
"no hardware"
,
/* Should never be seen */
"FarSync T2P"
,
"FarSync T2P"
,
"FarSync T4P"
"FarSync T4P"
,
"FarSync T1U"
,
"FarSync T2U"
,
"FarSync T4U"
,
"FarSync TE1"
};
};
static
void
__devinit
static
void
__devinit
fst_init_card
(
struct
fst_card_info
*
card
)
fst_init_card
(
struct
fst_card_info
*
card
)
{
{
int
i
;
int
i
;
int
err
;
int
err
;
...
@@ -1455,7 +2417,7 @@ fst_init_card ( struct fst_card_info *card )
...
@@ -1455,7 +2417,7 @@ fst_init_card ( struct fst_card_info *card )
* firmware detects something different later (should never happen)
* firmware detects something different later (should never happen)
* we'll have to revise it in some way then.
* we'll have to revise it in some way then.
*/
*/
for
(
i
=
0
;
i
<
card
->
nports
;
i
++
)
{
for
(
i
=
0
;
i
<
card
->
nports
;
i
++
)
{
err
=
register_hdlc_device
(
card
->
ports
[
i
].
dev
);
err
=
register_hdlc_device
(
card
->
ports
[
i
].
dev
);
if
(
err
<
0
)
{
if
(
err
<
0
)
{
int
j
;
int
j
;
...
@@ -1470,60 +2432,118 @@ fst_init_card ( struct fst_card_info *card )
...
@@ -1470,60 +2432,118 @@ fst_init_card ( struct fst_card_info *card )
}
}
}
}
printk
(
KERN_INFO
"%s-%s: %s IRQ%d, %d ports
\n
"
,
printk_info
(
"%s-%s: %s IRQ%d, %d ports
\n
"
,
port_to_dev
(
&
card
->
ports
[
0
])
->
name
,
port_to_dev
(
&
card
->
ports
[
0
])
->
name
,
port_to_dev
(
&
card
->
ports
[
card
->
nports
-
1
])
->
name
,
port_to_dev
(
&
card
->
ports
[
card
->
nports
-
1
])
->
name
,
type_strings
[
card
->
type
],
card
->
irq
,
card
->
nports
);
type_strings
[
card
->
type
],
card
->
irq
,
card
->
nports
);
}
}
/*
/*
* Initialise card when detected.
* Initialise card when detected.
* Returns 0 to indicate success, or errno otherwise.
* Returns 0 to indicate success, or errno otherwise.
*/
*/
static
int
__devinit
static
int
__devinit
fst_add_one
(
struct
pci_dev
*
pdev
,
const
struct
pci_device_id
*
ent
)
fst_add_one
(
struct
pci_dev
*
pdev
,
const
struct
pci_device_id
*
ent
)
{
{
static
int
firsttime_done
=
0
;
static
int
firsttime_done
=
0
;
static
int
no_of_cards_added
=
0
;
struct
fst_card_info
*
card
;
struct
fst_card_info
*
card
;
int
err
=
0
;
int
err
=
0
;
int
i
;
int
i
;
if
(
!
firsttime_done
)
if
(
!
firsttime_done
)
{
{
printk_info
(
"FarSync WAN driver "
FST_USER_VERSION
printk
(
KERN_INFO
"FarSync X21 driver "
FST_USER_VERSION
" (c) 2001-2004 FarSite Communications Ltd.
\n
"
);
" (c) 2001 FarSite Communications Ltd.
\n
"
);
firsttime_done
=
1
;
firsttime_done
=
1
;
dbg
(
DBG_ASS
,
"The value of debug mask is %x
\n
"
,
fst_debug_mask
);
}
/*
* We are going to be clever and allow certain cards not to be
* configured. An exclude list can be provided in /etc/modules.conf
*/
if
(
fst_excluded_cards
!=
0
)
{
/*
* There are cards to exclude
*
*/
for
(
i
=
0
;
i
<
fst_excluded_cards
;
i
++
)
{
if
((
pdev
->
devfn
)
>>
3
==
fst_excluded_list
[
i
])
{
printk_info
(
"FarSync PCI device %d not assigned
\n
"
,
(
pdev
->
devfn
)
>>
3
);
return
-
EBUSY
;
}
}
}
}
/* Allocate driver private data */
/* Allocate driver private data */
card
=
kmalloc
(
sizeof
(
struct
fst_card_info
),
GFP_KERNEL
);
card
=
kmalloc
(
sizeof
(
struct
fst_card_info
),
GFP_KERNEL
);
if
(
card
==
NULL
)
if
(
card
==
NULL
)
{
{
printk_err
(
"FarSync card found but insufficient memory for"
printk_err
(
"FarSync card found but insufficient memory for"
" driver storage
\n
"
);
" driver storage
\n
"
);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
memset
(
card
,
0
,
sizeof
(
struct
fst_card_info
));
memset
(
card
,
0
,
sizeof
(
struct
fst_card_info
));
/* Try to enable the device */
/* Try to enable the device */
if
((
err
=
pci_enable_device
(
pdev
))
!=
0
)
if
((
err
=
pci_enable_device
(
pdev
))
!=
0
)
{
{
printk_err
(
"Failed to enable card. Err %d
\n
"
,
-
err
);
printk_err
(
"Failed to enable card. Err %d
\n
"
,
-
err
);
kfree
(
card
);
goto
error_free_card
;
return
err
;
}
}
/* Record info we need*/
if
((
err
=
pci_request_regions
(
pdev
,
"FarSync"
))
!=
0
)
{
card
->
irq
=
pdev
->
irq
;
printk_err
(
"Failed to allocate regions. Err %d
\n
"
,
-
err
);
card
->
pci_conf
=
pci_resource_start
(
pdev
,
1
);
pci_disable_device
(
pdev
);
card
->
phys_mem
=
pci_resource_start
(
pdev
,
2
);
kfree
(
card
);
card
->
phys_ctlmem
=
pci_resource_start
(
pdev
,
3
);
return
err
;
}
/* Get virtual addresses of memory regions */
card
->
pci_conf
=
pci_resource_start
(
pdev
,
1
);
card
->
phys_mem
=
pci_resource_start
(
pdev
,
2
);
card
->
phys_ctlmem
=
pci_resource_start
(
pdev
,
3
);
if
((
card
->
mem
=
ioremap
(
card
->
phys_mem
,
FST_MEMSIZE
))
==
NULL
)
{
printk_err
(
"Physical memory remap failed
\n
"
);
pci_release_regions
(
pdev
);
pci_disable_device
(
pdev
);
kfree
(
card
);
return
-
ENODEV
;
}
if
((
card
->
ctlmem
=
ioremap
(
card
->
phys_ctlmem
,
0x10
))
==
NULL
)
{
printk_err
(
"Control memory remap failed
\n
"
);
pci_release_regions
(
pdev
);
pci_disable_device
(
pdev
);
kfree
(
card
);
return
-
ENODEV
;
}
dbg
(
DBG_PCI
,
"kernel mem %p, ctlmem %p
\n
"
,
card
->
mem
,
card
->
ctlmem
);
/* Register the interrupt handler */
if
(
request_irq
(
pdev
->
irq
,
fst_intr
,
SA_SHIRQ
,
FST_DEV_NAME
,
card
))
{
printk_err
(
"Unable to register interrupt %d
\n
"
,
card
->
irq
);
pci_release_regions
(
pdev
);
pci_disable_device
(
pdev
);
iounmap
(
card
->
ctlmem
);
iounmap
(
card
->
mem
);
kfree
(
card
);
return
-
ENODEV
;
}
/* Record info we need */
card
->
irq
=
pdev
->
irq
;
card
->
type
=
ent
->
driver_data
;
card
->
type
=
ent
->
driver_data
;
card
->
nports
=
(
ent
->
driver_data
==
FST_TYPE_T2P
)
?
2
:
4
;
card
->
family
=
((
ent
->
driver_data
==
FST_TYPE_T2P
)
||
(
ent
->
driver_data
==
FST_TYPE_T4P
))
?
FST_FAMILY_TXP
:
FST_FAMILY_TXU
;
if
((
ent
->
driver_data
==
FST_TYPE_T1U
)
||
(
ent
->
driver_data
==
FST_TYPE_TE1
))
card
->
nports
=
1
;
else
card
->
nports
=
((
ent
->
driver_data
==
FST_TYPE_T2P
)
||
(
ent
->
driver_data
==
FST_TYPE_T2U
))
?
2
:
4
;
card
->
state
=
FST_UNINIT
;
card
->
state
=
FST_UNINIT
;
spin_lock_init
(
&
card
->
card_lock
);
spin_lock_init
(
&
card
->
card_lock
);
for
(
i
=
0
;
i
<
card
->
nports
;
i
++
)
{
for
(
i
=
0
;
i
<
card
->
nports
;
i
++
)
{
...
@@ -1533,7 +2553,13 @@ fst_add_one ( struct pci_dev *pdev, const struct pci_device_id *ent )
...
@@ -1533,7 +2553,13 @@ fst_add_one ( struct pci_dev *pdev, const struct pci_device_id *ent )
while
(
i
--
)
while
(
i
--
)
free_netdev
(
card
->
ports
[
i
].
dev
);
free_netdev
(
card
->
ports
[
i
].
dev
);
printk_err
(
"FarSync: out of memory
\n
"
);
printk_err
(
"FarSync: out of memory
\n
"
);
goto
error_free_card
;
free_irq
(
card
->
irq
,
card
);
pci_release_regions
(
pdev
);
pci_disable_device
(
pdev
);
iounmap
(
card
->
ctlmem
);
iounmap
(
card
->
mem
);
kfree
(
card
);
return
-
ENODEV
;
}
}
card
->
ports
[
i
].
dev
=
dev
;
card
->
ports
[
i
].
dev
=
dev
;
card
->
ports
[
i
].
card
=
card
;
card
->
ports
[
i
].
card
=
card
;
...
@@ -1564,128 +2590,95 @@ fst_add_one ( struct pci_dev *pdev, const struct pci_device_id *ent )
...
@@ -1564,128 +2590,95 @@ fst_add_one ( struct pci_dev *pdev, const struct pci_device_id *ent )
hdlc
->
xmit
=
fst_start_xmit
;
hdlc
->
xmit
=
fst_start_xmit
;
}
}
dbg
(
DBG_PCI
,
"type %d nports %d irq %d
\n
"
,
card
->
type
,
card
->
device
=
pdev
;
card
->
nports
,
card
->
irq
);
dbg
(
DBG_PCI
,
"conf %04x mem %08x ctlmem %08x
\n
"
,
card
->
pci_conf
,
card
->
phys_mem
,
card
->
phys_ctlmem
);
/* Check we can get access to the memory and I/O regions */
if
(
!
request_region
(
card
->
pci_conf
,
0x80
,
"PLX config regs"
))
{
printk_err
(
"Unable to get config I/O @ 0x%04X
\n
"
,
card
->
pci_conf
);
err
=
-
ENODEV
;
goto
error_free_ports
;
}
if
(
!
request_mem_region
(
card
->
phys_mem
,
FST_MEMSIZE
,
"Shared RAM"
))
{
printk_err
(
"Unable to get main memory @ 0x%08X
\n
"
,
card
->
phys_mem
);
err
=
-
ENODEV
;
goto
error_release_io
;
}
if
(
!
request_mem_region
(
card
->
phys_ctlmem
,
0x10
,
"Control memory"
))
{
printk_err
(
"Unable to get control memory @ 0x%08X
\n
"
,
card
->
phys_ctlmem
);
err
=
-
ENODEV
;
goto
error_release_mem
;
}
/* Get virtual addresses of memory regions */
dbg
(
DBG_PCI
,
"type %d nports %d irq %d
\n
"
,
card
->
type
,
if
((
card
->
mem
=
ioremap
(
card
->
phys_mem
,
FST_MEMSIZE
))
==
NULL
)
card
->
nports
,
card
->
irq
);
{
dbg
(
DBG_PCI
,
"conf %04x mem %08x ctlmem %08x
\n
"
,
printk_err
(
"Physical memory remap failed
\n
"
);
card
->
pci_conf
,
card
->
phys_mem
,
card
->
phys_ctlmem
);
err
=
-
ENODEV
;
goto
error_release_ctlmem
;
}
if
((
card
->
ctlmem
=
ioremap
(
card
->
phys_ctlmem
,
0x10
))
==
NULL
)
{
printk_err
(
"Control memory remap failed
\n
"
);
err
=
-
ENODEV
;
goto
error_unmap_mem
;
}
dbg
(
DBG_PCI
,
"kernel mem %p, ctlmem %p
\n
"
,
card
->
mem
,
card
->
ctlmem
);
/* Reset the card's processor */
/* Reset the card's processor */
fst_cpureset
(
card
);
fst_cpureset
(
card
);
card
->
state
=
FST_RESET
;
card
->
state
=
FST_RESET
;
/* Register the interrupt handler */
/* Initialise DMA (if required) */
if
(
request_irq
(
card
->
irq
,
fst_intr
,
SA_SHIRQ
,
FST_DEV_NAME
,
card
))
fst_init_dma
(
card
);
{
printk_err
(
"Unable to register interrupt %d
\n
"
,
card
->
irq
);
err
=
-
ENODEV
;
goto
error_unmap_ctlmem
;
}
/* Record driver data for later use */
/* Record driver data for later use */
pci_set_drvdata
(
pdev
,
card
);
pci_set_drvdata
(
pdev
,
card
);
/* Remainder of card setup */
/* Remainder of card setup */
fst_init_card
(
card
);
fst_card_array
[
no_of_cards_added
]
=
card
;
card
->
card_no
=
no_of_cards_added
++
;
/* Record instance and bump it */
fst_init_card
(
card
);
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
/*
* Allocate a dma buffer for transmit and receives
*/
card
->
rx_dma_handle_host
=
pci_alloc_consistent
(
card
->
device
,
FST_MAX_MTU
,
&
card
->
rx_dma_handle_card
);
if
(
card
->
rx_dma_handle_host
==
NULL
)
{
printk_err
(
"Could not allocate rx dma buffer
\n
"
);
fst_disable_intr
(
card
);
pci_release_regions
(
pdev
);
pci_disable_device
(
pdev
);
iounmap
(
card
->
ctlmem
);
iounmap
(
card
->
mem
);
kfree
(
card
);
return
-
ENOMEM
;
}
card
->
tx_dma_handle_host
=
pci_alloc_consistent
(
card
->
device
,
FST_MAX_MTU
,
&
card
->
tx_dma_handle_card
);
if
(
card
->
tx_dma_handle_host
==
NULL
)
{
printk_err
(
"Could not allocate tx dma buffer
\n
"
);
fst_disable_intr
(
card
);
pci_release_regions
(
pdev
);
pci_disable_device
(
pdev
);
iounmap
(
card
->
ctlmem
);
iounmap
(
card
->
mem
);
kfree
(
card
);
return
-
ENOMEM
;
}
}
return
0
;
/* Success */
return
0
;
/* Success */
/* Failure. Release resources */
error_unmap_ctlmem:
iounmap
(
card
->
ctlmem
);
error_unmap_mem:
iounmap
(
card
->
mem
);
error_release_ctlmem:
release_mem_region
(
card
->
phys_ctlmem
,
0x10
);
error_release_mem:
release_mem_region
(
card
->
phys_mem
,
FST_MEMSIZE
);
error_release_io:
release_region
(
card
->
pci_conf
,
0x80
);
error_free_ports:
for
(
i
=
0
;
i
<
card
->
nports
;
i
++
)
free_netdev
(
card
->
ports
[
i
].
dev
);
error_free_card:
kfree
(
card
);
return
err
;
}
}
/*
/*
* Cleanup and close down a card
* Cleanup and close down a card
*/
*/
static
void
__devexit
static
void
__devexit
fst_remove_one
(
struct
pci_dev
*
pdev
)
fst_remove_one
(
struct
pci_dev
*
pdev
)
{
{
struct
fst_card_info
*
card
;
struct
fst_card_info
*
card
;
int
i
;
int
i
;
card
=
pci_get_drvdata
(
pdev
);
card
=
pci_get_drvdata
(
pdev
);
for
(
i
=
0
;
i
<
card
->
nports
;
i
++
)
for
(
i
=
0
;
i
<
card
->
nports
;
i
++
)
{
{
struct
net_device
*
dev
=
port_to_dev
(
&
card
->
ports
[
i
]);
struct
net_device
*
dev
=
port_to_dev
(
&
card
->
ports
[
i
]);
unregister_hdlc_device
(
dev
);
unregister_hdlc_device
(
dev
);
}
}
fst_disable_intr
(
card
);
fst_disable_intr
(
card
);
free_irq
(
card
->
irq
,
card
);
free_irq
(
card
->
irq
,
card
);
iounmap
(
card
->
ctlmem
);
iounmap
(
card
->
mem
);
release_mem_region
(
card
->
phys_ctlmem
,
0x10
);
iounmap
(
card
->
ctlmem
);
release_mem_region
(
card
->
phys_mem
,
FST_MEMSIZE
);
iounmap
(
card
->
mem
);
release_region
(
card
->
pci_conf
,
0x80
);
pci_release_regions
(
pdev
);
if
(
card
->
family
==
FST_FAMILY_TXU
)
{
for
(
i
=
0
;
i
<
card
->
nports
;
i
++
)
/*
free_netdev
(
card
->
ports
[
i
].
dev
);
* Free dma buffers
*/
kfree
(
card
);
pci_free_consistent
(
card
->
device
,
FST_MAX_MTU
,
card
->
rx_dma_handle_host
,
card
->
rx_dma_handle_card
);
pci_free_consistent
(
card
->
device
,
FST_MAX_MTU
,
card
->
tx_dma_handle_host
,
card
->
tx_dma_handle_card
);
}
fst_card_array
[
card
->
card_no
]
=
NULL
;
}
}
static
struct
pci_driver
fst_driver
=
{
static
struct
pci_driver
fst_driver
=
{
...
@@ -1700,15 +2693,20 @@ static struct pci_driver fst_driver = {
...
@@ -1700,15 +2693,20 @@ static struct pci_driver fst_driver = {
static
int
__init
static
int
__init
fst_init
(
void
)
fst_init
(
void
)
{
{
return
pci_module_init
(
&
fst_driver
);
int
i
;
for
(
i
=
0
;
i
<
FST_MAX_CARDS
;
i
++
)
fst_card_array
[
i
]
=
NULL
;
spin_lock_init
(
&
fst_work_q_lock
);
return
pci_module_init
(
&
fst_driver
);
}
}
static
void
__exit
static
void
__exit
fst_cleanup_module
(
void
)
fst_cleanup_module
(
void
)
{
{
pci_unregister_driver
(
&
fst_driver
);
printk_info
(
"FarSync WAN driver unloading
\n
"
);
pci_unregister_driver
(
&
fst_driver
);
}
}
module_init
(
fst_init
);
module_init
(
fst_init
);
module_exit
(
fst_cleanup_module
);
module_exit
(
fst_cleanup_module
);
drivers/net/wan/farsync.h
View file @
ee86da9d
...
@@ -32,8 +32,13 @@
...
@@ -32,8 +32,13 @@
* A short common prefix is useful for routines within the driver to avoid
* A short common prefix is useful for routines within the driver to avoid
* conflict with other similar drivers and I chosen to use "fst_" for this
* conflict with other similar drivers and I chosen to use "fst_" for this
* purpose (FarSite T-series).
* purpose (FarSite T-series).
*
* Finally the device driver needs a short network interface name. Since
* "hdlc" is already in use I've chosen the even less informative "sync"
* for the present.
*/
*/
#define FST_NAME "fst"
/* In debug/info etc */
#define FST_NAME "fst"
/* In debug/info etc */
#define FST_NDEV_NAME "sync"
/* For net interface */
#define FST_DEV_NAME "farsync"
/* For misc interfaces */
#define FST_DEV_NAME "farsync"
/* For misc interfaces */
...
@@ -45,7 +50,7 @@
...
@@ -45,7 +50,7 @@
* have individual versions (or IDs) that move much faster than the
* have individual versions (or IDs) that move much faster than the
* the release version as individual updates are tracked.
* the release version as individual updates are tracked.
*/
*/
#define FST_USER_VERSION "
0.09
"
#define FST_USER_VERSION "
1.04
"
/* Ioctl call command values
/* Ioctl call command values
...
@@ -100,6 +105,7 @@ struct fstioc_info {
...
@@ -100,6 +105,7 @@ struct fstioc_info {
unsigned
int
state
;
/* State of card */
unsigned
int
state
;
/* State of card */
unsigned
int
index
;
/* Index of port ioctl was issued on */
unsigned
int
index
;
/* Index of port ioctl was issued on */
unsigned
int
smcFirmwareVersion
;
unsigned
int
smcFirmwareVersion
;
unsigned
long
kernelVersion
;
/* What Kernel version we are working with */
unsigned
short
lineInterface
;
/* Physical interface type */
unsigned
short
lineInterface
;
/* Physical interface type */
unsigned
char
proto
;
/* Line protocol */
unsigned
char
proto
;
/* Line protocol */
unsigned
char
internalClock
;
/* 1 => internal clock, 0 => external */
unsigned
char
internalClock
;
/* 1 => internal clock, 0 => external */
...
@@ -110,6 +116,31 @@ struct fstioc_info {
...
@@ -110,6 +116,31 @@ struct fstioc_info {
unsigned
short
cableStatus
;
/* lsb: 0=> present, 1=> absent */
unsigned
short
cableStatus
;
/* lsb: 0=> present, 1=> absent */
unsigned
short
cardMode
;
/* lsb: LED id mode */
unsigned
short
cardMode
;
/* lsb: LED id mode */
unsigned
short
debug
;
/* Debug flags */
unsigned
short
debug
;
/* Debug flags */
unsigned
char
transparentMode
;
/* Not used always 0 */
unsigned
char
invertClock
;
/* Invert clock feature for syncing */
unsigned
char
startingSlot
;
/* Time slot to use for start of tx */
unsigned
char
clockSource
;
/* External or internal */
unsigned
char
framing
;
/* E1, T1 or J1 */
unsigned
char
structure
;
/* unframed, double, crc4, f4, f12, */
/* f24 f72 */
unsigned
char
interface
;
/* rj48c or bnc */
unsigned
char
coding
;
/* hdb3 b8zs */
unsigned
char
lineBuildOut
;
/* 0, -7.5, -15, -22 */
unsigned
char
equalizer
;
/* short or lon haul settings */
unsigned
char
loopMode
;
/* various loopbacks */
unsigned
char
range
;
/* cable lengths */
unsigned
char
txBufferMode
;
/* tx elastic buffer depth */
unsigned
char
rxBufferMode
;
/* rx elastic buffer depth */
unsigned
char
losThreshold
;
/* Attenuation on LOS signal */
unsigned
char
idleCode
;
/* Value to send as idle timeslot */
unsigned
int
receiveBufferDelay
;
/* delay thro rx buffer timeslots */
unsigned
int
framingErrorCount
;
/* framing errors */
unsigned
int
codeViolationCount
;
/* code violations */
unsigned
int
crcErrorCount
;
/* CRC errors */
int
lineAttenuation
;
/* in dB*/
unsigned
short
lossOfSignal
;
unsigned
short
receiveRemoteAlarm
;
unsigned
short
alarmIndicationSignal
;
};
};
/* "valid" bitmask */
/* "valid" bitmask */
...
@@ -131,13 +162,23 @@ struct fstioc_info {
...
@@ -131,13 +162,23 @@ struct fstioc_info {
*/
*/
#define FSTVAL_PROTO 0x00000200
/* proto */
#define FSTVAL_PROTO 0x00000200
/* proto */
#define FSTVAL_MODE 0x00000400
/* cardMode */
#define FSTVAL_MODE 0x00000400
/* cardMode */
#define FSTVAL_PHASE 0x00000800
/* Clock phase */
#define FSTVAL_TE1 0x00001000
/* T1E1 Configuration */
#define FSTVAL_DEBUG 0x80000000
/* debug */
#define FSTVAL_DEBUG 0x80000000
/* debug */
#define FSTVAL_ALL 0x0000
07
FF
/* Note: does not include DEBUG flag */
#define FSTVAL_ALL 0x0000
1F
FF
/* Note: does not include DEBUG flag */
/* "type" */
/* "type" */
#define FST_TYPE_NONE 0
/* Probably should never happen */
#define FST_TYPE_NONE 0
/* Probably should never happen */
#define FST_TYPE_T2P 1
/* T2P X21 2 port card */
#define FST_TYPE_T2P 1
/* T2P X21 2 port card */
#define FST_TYPE_T4P 2
/* T4P X21 4 port card */
#define FST_TYPE_T4P 2
/* T4P X21 4 port card */
#define FST_TYPE_T1U 3
/* T1U X21 1 port card */
#define FST_TYPE_T2U 4
/* T2U X21 2 port card */
#define FST_TYPE_T4U 5
/* T4U X21 4 port card */
#define FST_TYPE_TE1 6
/* T1E1 X21 1 port card */
/* "family" */
#define FST_FAMILY_TXP 0
/* T2P or T4P */
#define FST_FAMILY_TXU 1
/* T1U or T2U or T4U */
/* "state" */
/* "state" */
#define FST_UNINIT 0
/* Raw uninitialised state following
#define FST_UNINIT 0
/* Raw uninitialised state following
...
@@ -155,6 +196,10 @@ struct fstioc_info {
...
@@ -155,6 +196,10 @@ struct fstioc_info {
#define V24 1
#define V24 1
#define X21 2
#define X21 2
#define V35 3
#define V35 3
#define X21D 4
#define T1 5
#define E1 6
#define J1 7
/* "proto" */
/* "proto" */
#define FST_HDLC 1
/* Cisco compatible HDLC */
#define FST_HDLC 1
/* Cisco compatible HDLC */
...
@@ -187,6 +232,97 @@ struct fstioc_info {
...
@@ -187,6 +232,97 @@ struct fstioc_info {
/* "cardMode" bitmask */
/* "cardMode" bitmask */
#define CARD_MODE_IDENTIFY 0x0001
#define CARD_MODE_IDENTIFY 0x0001
/*
* Constants for T1/E1 configuration
*/
/*
* Clock source
*/
#define CLOCKING_SLAVE 0
#define CLOCKING_MASTER 1
/*
* Framing
*/
#define FRAMING_E1 0
#define FRAMING_J1 1
#define FRAMING_T1 2
/*
* Structure
*/
#define STRUCTURE_UNFRAMED 0
#define STRUCTURE_E1_DOUBLE 1
#define STRUCTURE_E1_CRC4 2
#define STRUCTURE_E1_CRC4M 3
#define STRUCTURE_T1_4 4
#define STRUCTURE_T1_12 5
#define STRUCTURE_T1_24 6
#define STRUCTURE_T1_72 7
/*
* Interface
*/
#define INTERFACE_RJ48C 0
#define INTERFACE_BNC 1
/*
* Coding
*/
#define CODING_HDB3 0
#define CODING_NRZ 1
#define CODING_CMI 2
#define CODING_CMI_HDB3 3
#define CODING_CMI_B8ZS 4
#define CODING_AMI 5
#define CODING_AMI_ZCS 6
#define CODING_B8ZS 7
/*
* Line Build Out
*/
#define LBO_0dB 0
#define LBO_7dB5 1
#define LBO_15dB 2
#define LBO_22dB5 3
/*
* Range for long haul t1 > 655ft
*/
#define RANGE_0_133_FT 0
#define RANGE_0_40_M RANGE_0_133_FT
#define RANGE_133_266_FT 1
#define RANGE_40_81_M RANGE_133_266_FT
#define RANGE_266_399_FT 2
#define RANGE_81_122_M RANGE_266_399_FT
#define RANGE_399_533_FT 3
#define RANGE_122_162_M RANGE_399_533_FT
#define RANGE_533_655_FT 4
#define RANGE_162_200_M RANGE_533_655_FT
/*
* Receive Equaliser
*/
#define EQUALIZER_SHORT 0
#define EQUALIZER_LONG 1
/*
* Loop modes
*/
#define LOOP_NONE 0
#define LOOP_LOCAL 1
#define LOOP_PAYLOAD_EXC_TS0 2
#define LOOP_PAYLOAD_INC_TS0 3
#define LOOP_REMOTE 4
/*
* Buffer modes
*/
#define BUFFER_2_FRAME 0
#define BUFFER_1_FRAME 1
#define BUFFER_96_BIT 2
#define BUFFER_NONE 3
/* Debug support
/* Debug support
*
*
...
...
include/linux/if.h
View file @
ee86da9d
...
@@ -63,6 +63,7 @@
...
@@ -63,6 +63,7 @@
#define IF_IFACE_T1 0x1003
/* T1 telco serial interface */
#define IF_IFACE_T1 0x1003
/* T1 telco serial interface */
#define IF_IFACE_E1 0x1004
/* E1 telco serial interface */
#define IF_IFACE_E1 0x1004
/* E1 telco serial interface */
#define IF_IFACE_SYNC_SERIAL 0x1005
/* can't be set by software */
#define IF_IFACE_SYNC_SERIAL 0x1005
/* can't be set by software */
#define IF_IFACE_X21D 0x1006
/* X.21 Dual Clocking (FarSite) */
/* For definitions see hdlc.h */
/* For definitions see hdlc.h */
#define IF_PROTO_HDLC 0x2000
/* raw HDLC protocol */
#define IF_PROTO_HDLC 0x2000
/* raw HDLC protocol */
...
@@ -77,6 +78,7 @@
...
@@ -77,6 +78,7 @@
#define IF_PROTO_FR_DEL_ETH_PVC 0x2009
/* Delete FR Ethernet-bridged PVC */
#define IF_PROTO_FR_DEL_ETH_PVC 0x2009
/* Delete FR Ethernet-bridged PVC */
#define IF_PROTO_FR_PVC 0x200A
/* for reading PVC status */
#define IF_PROTO_FR_PVC 0x200A
/* for reading PVC status */
#define IF_PROTO_FR_ETH_PVC 0x200B
#define IF_PROTO_FR_ETH_PVC 0x200B
#define IF_PROTO_RAW 0x200C
/* RAW Socket */
/*
/*
...
...
include/linux/pci_ids.h
View file @
ee86da9d
...
@@ -1886,6 +1886,15 @@
...
@@ -1886,6 +1886,15 @@
#define PCI_DEVICE_ID_MACROLINK_MCCR8 0x2000
#define PCI_DEVICE_ID_MACROLINK_MCCR8 0x2000
#define PCI_DEVICE_ID_MACROLINK_MCCR 0x2001
#define PCI_DEVICE_ID_MACROLINK_MCCR 0x2001
#define PCI_VENDOR_ID_FARSITE 0x1619
#define PCI_DEVICE_ID_FARSITE_T2P 0x0400
#define PCI_DEVICE_ID_FARSITE_T4P 0x0440
#define PCI_DEVICE_ID_FARSITE_T1U 0x0610
#define PCI_DEVICE_ID_FARSITE_T2U 0x0620
#define PCI_DEVICE_ID_FARSITE_T4U 0x0640
#define PCI_DEVICE_ID_FARSITE_TE1 0x1610
#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612
#define PCI_VENDOR_ID_ALTIMA 0x173b
#define PCI_VENDOR_ID_ALTIMA 0x173b
#define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8
#define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8
#define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9
#define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment