Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
e1acfd06
Commit
e1acfd06
authored
Jan 22, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://kernel.bkbits.net/davem/net-2.6
into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents
ca583a3c
7b9b76c5
Changes
19
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
784 additions
and
215 deletions
+784
-215
Documentation/networking/netdevices.txt
Documentation/networking/netdevices.txt
+6
-3
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib.h
+5
-4
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
+2
-2
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+22
-4
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000.h
+1
-0
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_main.c
+22
-11
drivers/net/sungem.c
drivers/net/sungem.c
+69
-53
drivers/net/sungem.h
drivers/net/sungem.h
+1
-0
drivers/net/tg3.c
drivers/net/tg3.c
+546
-88
drivers/net/tg3.h
drivers/net/tg3.h
+52
-6
include/linux/netdevice.h
include/linux/netdevice.h
+4
-5
net/atm/clip.c
net/atm/clip.c
+2
-2
net/core/dev.c
net/core/dev.c
+2
-2
net/core/dev_mcast.c
net/core/dev_mcast.c
+13
-13
net/core/netpoll.c
net/core/netpoll.c
+3
-3
net/core/pktgen.c
net/core/pktgen.c
+7
-2
net/netlink/af_netlink.c
net/netlink/af_netlink.c
+3
-3
net/sched/sch_generic.c
net/sched/sch_generic.c
+21
-11
net/sched/sch_teql.c
net/sched/sch_teql.c
+3
-3
No files found.
Documentation/networking/netdevices.txt
View file @
e1acfd06
...
...
@@ -45,9 +45,10 @@ dev->hard_start_xmit:
Synchronization: dev->xmit_lock spinlock.
When the driver sets NETIF_F_LLTX in dev->features this will be
called without holding xmit_lock. In this case the driver
has to execute it's transmission routine in a completely lockless
manner. It is recommended only for queueless devices such
loopback and tunnels.
has to lock by itself when needed. It is recommended to use a try lock
for this and return -1 when the spin lock fails.
The locking there should also properly protect against
set_multicast_list
Context: BHs disabled
Notes: netif_queue_stopped() is guaranteed false
Return codes:
...
...
@@ -55,6 +56,8 @@ dev->hard_start_xmit:
o NETDEV_TX_BUSY Cannot transmit packet, try later
Usually a bug, means queue start/stop flow control is broken in
the driver. Note: the driver must NOT put the skb in its DMA ring.
o NETDEV_TX_LOCKED Locking failed, please retry quickly.
Only valid when NETIF_F_LLTX is set.
dev->tx_timeout:
Synchronization: dev->xmit_lock spinlock.
...
...
drivers/infiniband/ulp/ipoib/ipoib.h
View file @
e1acfd06
...
...
@@ -104,10 +104,10 @@ struct ipoib_buf {
};
/*
* Device private locking:
netdev->xmit_lock protects members used
*
in TX fast path
.
* lock protects everything else. lock nests inside of
xmit
_lock (ie
*
xmit
_lock must be acquired first if needed).
* Device private locking:
tx_lock protects members used in TX fast
*
path (and we use LLTX so upper layers don't do extra locking)
.
* lock protects everything else. lock nests inside of
tx
_lock (ie
*
tx
_lock must be acquired first if needed).
*/
struct
ipoib_dev_priv
{
spinlock_t
lock
;
...
...
@@ -150,6 +150,7 @@ struct ipoib_dev_priv {
struct
ipoib_buf
*
rx_ring
;
spinlock_t
tx_lock
;
struct
ipoib_buf
*
tx_ring
;
unsigned
tx_head
;
unsigned
tx_tail
;
...
...
drivers/infiniband/ulp/ipoib/ipoib_ib.c
View file @
e1acfd06
...
...
@@ -247,12 +247,12 @@ static void ipoib_ib_handle_wc(struct net_device *dev,
dev_kfree_skb_any
(
tx_req
->
skb
);
spin_lock_irqsave
(
&
dev
->
xmit
_lock
,
flags
);
spin_lock_irqsave
(
&
priv
->
tx
_lock
,
flags
);
++
priv
->
tx_tail
;
if
(
netif_queue_stopped
(
dev
)
&&
priv
->
tx_head
-
priv
->
tx_tail
<=
IPOIB_TX_RING_SIZE
/
2
)
netif_wake_queue
(
dev
);
spin_unlock_irqrestore
(
&
dev
->
xmit
_lock
,
flags
);
spin_unlock_irqrestore
(
&
priv
->
tx
_lock
,
flags
);
if
(
wc
->
status
!=
IB_WC_SUCCESS
&&
wc
->
status
!=
IB_WC_WR_FLUSH_ERR
)
...
...
drivers/infiniband/ulp/ipoib/ipoib_main.c
View file @
e1acfd06
...
...
@@ -411,7 +411,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
/*
* We can only be called from ipoib_start_xmit, so we're
* inside
dev->xmit
_lock -- no need to save/restore flags.
* inside
tx
_lock -- no need to save/restore flags.
*/
spin_lock
(
&
priv
->
lock
);
...
...
@@ -483,7 +483,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
/*
* We can only be called from ipoib_start_xmit, so we're
* inside
dev->xmit
_lock -- no need to save/restore flags.
* inside
tx
_lock -- no need to save/restore flags.
*/
spin_lock
(
&
priv
->
lock
);
...
...
@@ -526,11 +526,27 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_unlock
(
&
priv
->
lock
);
}
/* Called with dev->xmit_lock held and IRQs disabled. */
static
int
ipoib_start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
{
struct
ipoib_dev_priv
*
priv
=
netdev_priv
(
dev
);
struct
ipoib_neigh
*
neigh
;
unsigned
long
flags
;
local_irq_save
(
flags
);
if
(
!
spin_trylock
(
&
priv
->
tx_lock
))
{
local_irq_restore
(
flags
);
return
NETDEV_TX_LOCKED
;
}
/*
* Check if our queue is stopped. Since we have the LLTX bit
* set, we can't rely on netif_stop_queue() preventing our
* xmit function from being called with a full queue.
*/
if
(
unlikely
(
netif_queue_stopped
(
dev
)))
{
spin_unlock_irqrestore
(
&
priv
->
tx_lock
,
flags
);
return
NETDEV_TX_BUSY
;
}
if
(
skb
->
dst
&&
skb
->
dst
->
neighbour
)
{
if
(
unlikely
(
!*
to_ipoib_neigh
(
skb
->
dst
->
neighbour
)))
{
...
...
@@ -585,6 +601,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
out:
spin_unlock_irqrestore
(
&
priv
->
tx_lock
,
flags
);
return
NETDEV_TX_OK
;
}
...
...
@@ -780,7 +797,7 @@ static void ipoib_setup(struct net_device *dev)
dev
->
addr_len
=
INFINIBAND_ALEN
;
dev
->
type
=
ARPHRD_INFINIBAND
;
dev
->
tx_queue_len
=
IPOIB_TX_RING_SIZE
*
2
;
dev
->
features
=
NETIF_F_VLAN_CHALLENGED
;
dev
->
features
=
NETIF_F_VLAN_CHALLENGED
|
NETIF_F_LLTX
;
/* MTU will be reset when mcast join happens */
dev
->
mtu
=
IPOIB_PACKET_SIZE
-
IPOIB_ENCAP_LEN
;
...
...
@@ -795,6 +812,7 @@ static void ipoib_setup(struct net_device *dev)
priv
->
dev
=
dev
;
spin_lock_init
(
&
priv
->
lock
);
spin_lock_init
(
&
priv
->
tx_lock
);
init_MUTEX
(
&
priv
->
mcast_mutex
);
init_MUTEX
(
&
priv
->
vlan_mutex
);
...
...
drivers/net/e1000/e1000.h
View file @
e1acfd06
...
...
@@ -209,6 +209,7 @@ struct e1000_adapter {
/* TX */
struct
e1000_desc_ring
tx_ring
;
spinlock_t
tx_lock
;
uint32_t
txd_cmd
;
uint32_t
tx_int_delay
;
uint32_t
tx_abs_int_delay
;
...
...
drivers/net/e1000/e1000_main.c
View file @
e1acfd06
...
...
@@ -291,9 +291,7 @@ e1000_up(struct e1000_adapter *adapter)
e1000_phy_reset
(
&
adapter
->
hw
);
}
spin_lock_irq
(
&
netdev
->
xmit_lock
);
e1000_set_multi
(
netdev
);
spin_unlock_irq
(
&
netdev
->
xmit_lock
);
e1000_restore_vlan
(
adapter
);
...
...
@@ -522,6 +520,9 @@ e1000_probe(struct pci_dev *pdev,
if
(
pci_using_dac
)
netdev
->
features
|=
NETIF_F_HIGHDMA
;
/* hard_start_xmit is safe against parallel locking */
netdev
->
features
|=
NETIF_F_LLTX
;
/* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */
...
...
@@ -731,6 +732,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
atomic_set
(
&
adapter
->
irq_sem
,
1
);
spin_lock_init
(
&
adapter
->
stats_lock
);
spin_lock_init
(
&
adapter
->
tx_lock
);
return
0
;
}
...
...
@@ -1291,8 +1293,6 @@ e1000_set_mac(struct net_device *netdev, void *p)
* list or the network interface flags are updated. This routine is
* responsible for configuring the hardware for proper multicast,
* promiscuous mode, and all-multi behavior.
*
* Called with netdev->xmit_lock held and IRQs disabled.
**/
static
void
...
...
@@ -1304,9 +1304,12 @@ e1000_set_multi(struct net_device *netdev)
uint32_t
rctl
;
uint32_t
hash_value
;
int
i
;
unsigned
long
flags
;
/* Check for Promiscuous and All Multicast modes */
spin_lock_irqsave
(
&
adapter
->
tx_lock
,
flags
);
rctl
=
E1000_READ_REG
(
hw
,
RCTL
);
if
(
netdev
->
flags
&
IFF_PROMISC
)
{
...
...
@@ -1355,6 +1358,8 @@ e1000_set_multi(struct net_device *netdev)
if
(
hw
->
mac_type
==
e1000_82542_rev2_0
)
e1000_leave_82542_rst
(
adapter
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
}
/* Need to wait a few seconds after link up to get diagnostic information from
...
...
@@ -1781,8 +1786,6 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
}
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
/* Called with dev->xmit_lock held and interrupts disabled. */
static
int
e1000_xmit_frame
(
struct
sk_buff
*
skb
,
struct
net_device
*
netdev
)
{
...
...
@@ -1791,6 +1794,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned
int
max_txd_pwr
=
E1000_MAX_TXD_PWR
;
unsigned
int
tx_flags
=
0
;
unsigned
int
len
=
skb
->
len
;
unsigned
long
flags
;
unsigned
int
nr_frags
=
0
;
unsigned
int
mss
=
0
;
int
count
=
0
;
...
...
@@ -1834,10 +1838,18 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if
(
adapter
->
pcix_82544
)
count
+=
nr_frags
;
local_irq_save
(
flags
);
if
(
!
spin_trylock
(
&
adapter
->
tx_lock
))
{
/* Collision - tell upper layer to requeue */
local_irq_restore
(
flags
);
return
NETDEV_TX_LOCKED
;
}
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
if
(
unlikely
(
E1000_DESC_UNUSED
(
&
adapter
->
tx_ring
)
<
count
+
2
))
{
netif_stop_queue
(
netdev
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
NETDEV_TX_BUSY
;
}
...
...
@@ -1845,6 +1857,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if
(
unlikely
(
e1000_82547_fifo_workaround
(
adapter
,
skb
)))
{
netif_stop_queue
(
netdev
);
mod_timer
(
&
adapter
->
tx_fifo_stall_timer
,
jiffies
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
NETDEV_TX_BUSY
;
}
}
...
...
@@ -1871,6 +1884,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if
(
unlikely
(
E1000_DESC_UNUSED
(
&
adapter
->
tx_ring
)
<
MAX_SKB_FRAGS
+
2
))
netif_stop_queue
(
netdev
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
NETDEV_TX_OK
;
}
...
...
@@ -2220,13 +2234,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring
->
next_to_clean
=
i
;
spin_lock
(
&
netdev
->
xmit
_lock
);
spin_lock
(
&
adapter
->
tx
_lock
);
if
(
unlikely
(
cleaned
&&
netif_queue_stopped
(
netdev
)
&&
netif_carrier_ok
(
netdev
)))
netif_wake_queue
(
netdev
);
spin_unlock
(
&
netdev
->
xmit
_lock
);
spin_unlock
(
&
adapter
->
tx
_lock
);
return
cleaned
;
}
...
...
@@ -2805,10 +2819,7 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
if
(
wufc
)
{
e1000_setup_rctl
(
adapter
);
spin_lock_irq
(
&
netdev
->
xmit_lock
);
e1000_set_multi
(
netdev
);
spin_unlock_irq
(
&
netdev
->
xmit_lock
);
/* turn on all-multi mode if wake on multicast is enabled */
if
(
adapter
->
wol
&
E1000_WUFC_MC
)
{
...
...
drivers/net/sungem.c
View file @
e1acfd06
...
...
@@ -835,9 +835,9 @@ static int gem_poll(struct net_device *dev, int *budget)
}
/* Run TX completion thread */
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gem_tx
(
dev
,
gp
,
gp
->
status
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irqrestore
(
&
gp
->
lock
,
flags
);
...
...
@@ -932,12 +932,12 @@ static void gem_tx_timeout(struct net_device *dev)
readl
(
gp
->
regs
+
MAC_RXCFG
));
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gp
->
reset_task_pending
=
2
;
schedule_work
(
&
gp
->
reset_task
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
}
...
...
@@ -955,6 +955,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct
gem
*
gp
=
dev
->
priv
;
int
entry
;
u64
ctrl
;
unsigned
long
flags
;
ctrl
=
0
;
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
{
...
...
@@ -968,9 +969,17 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
(
csum_stuff_off
<<
21
));
}
local_irq_save
(
flags
);
if
(
!
spin_trylock
(
&
gp
->
tx_lock
))
{
/* Tell upper layer to requeue */
local_irq_restore
(
flags
);
return
NETDEV_TX_LOCKED
;
}
/* This is a hard error, log it. */
if
(
TX_BUFFS_AVAIL
(
gp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
))
{
netif_stop_queue
(
dev
);
spin_unlock_irqrestore
(
&
gp
->
tx_lock
,
flags
);
printk
(
KERN_ERR
PFX
"%s: BUG! Tx Ring full when queue awake!
\n
"
,
dev
->
name
);
return
NETDEV_TX_BUSY
;
...
...
@@ -1057,6 +1066,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev
->
name
,
entry
,
skb
->
len
);
mb
();
writel
(
gp
->
tx_new
,
gp
->
regs
+
TXDMA_KICK
);
spin_unlock_irqrestore
(
&
gp
->
tx_lock
,
flags
);
dev
->
trans_start
=
jiffies
;
...
...
@@ -1087,11 +1097,11 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
}
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
dev
->
mtu
=
new_mtu
;
gp
->
reset_task_pending
=
1
;
schedule_work
(
&
gp
->
reset_task
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
flush_scheduled_work
();
...
...
@@ -1101,7 +1111,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
#define STOP_TRIES 32
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_stop
(
struct
gem
*
gp
)
{
int
limit
;
...
...
@@ -1127,7 +1137,7 @@ static void gem_stop(struct gem *gp)
printk
(
KERN_ERR
"%s: SW reset is ghetto.
\n
"
,
gp
->
dev
->
name
);
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_start_dma
(
struct
gem
*
gp
)
{
unsigned
long
val
;
...
...
@@ -1152,7 +1162,7 @@ static void gem_start_dma(struct gem *gp)
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
// XXX dbl check what that function should do when called on PCS PHY
static
void
gem_begin_auto_negotiation
(
struct
gem
*
gp
,
struct
ethtool_cmd
*
ep
)
{
...
...
@@ -1239,7 +1249,7 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
/* A link-up condition has occurred, initialize and enable the
* rest of the chip.
*
* Must be invoked under gp->lock and
dev->xmit
_lock.
* Must be invoked under gp->lock and
gp->tx
_lock.
*/
static
int
gem_set_link_modes
(
struct
gem
*
gp
)
{
...
...
@@ -1346,7 +1356,7 @@ static int gem_set_link_modes(struct gem *gp)
return
0
;
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
int
gem_mdio_link_not_up
(
struct
gem
*
gp
)
{
switch
(
gp
->
lstate
)
{
...
...
@@ -1404,7 +1414,7 @@ static void gem_reset_task(void *data)
netif_poll_disable
(
gp
->
dev
);
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
if
(
gp
->
hw_running
&&
gp
->
opened
)
{
netif_stop_queue
(
gp
->
dev
);
...
...
@@ -1420,7 +1430,7 @@ static void gem_reset_task(void *data)
}
gp
->
reset_task_pending
=
0
;
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
netif_poll_enable
(
gp
->
dev
);
}
...
...
@@ -1434,7 +1444,7 @@ static void gem_link_timer(unsigned long data)
return
;
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
/* If the link of task is still pending, we just
* reschedule the link timer
...
...
@@ -1504,11 +1514,11 @@ static void gem_link_timer(unsigned long data)
restart:
mod_timer
(
&
gp
->
link_timer
,
jiffies
+
((
12
*
HZ
)
/
10
));
out_unlock:
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_clean_rings
(
struct
gem
*
gp
)
{
struct
gem_init_block
*
gb
=
gp
->
init_block
;
...
...
@@ -1559,7 +1569,7 @@ static void gem_clean_rings(struct gem *gp)
}
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_init_rings
(
struct
gem
*
gp
)
{
struct
gem_init_block
*
gb
=
gp
->
init_block
;
...
...
@@ -1609,7 +1619,7 @@ static void gem_init_rings(struct gem *gp)
wmb
();
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_init_phy
(
struct
gem
*
gp
)
{
u32
mifcfg
;
...
...
@@ -1747,7 +1757,7 @@ static void gem_init_phy(struct gem *gp)
}
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_init_dma
(
struct
gem
*
gp
)
{
u64
desc_dma
=
(
u64
)
gp
->
gblock_dvma
;
...
...
@@ -1785,7 +1795,7 @@ static void gem_init_dma(struct gem *gp)
gp
->
regs
+
RXDMA_BLANK
);
}
/* Must be invoked under
dev->xmit
_lock. */
/* Must be invoked under
gp->lock and gp->tx
_lock. */
static
u32
gem_setup_multicast
(
struct
gem
*
gp
)
{
...
...
@@ -1828,7 +1838,7 @@ gem_setup_multicast(struct gem *gp)
return
rxcfg
;
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_init_mac
(
struct
gem
*
gp
)
{
unsigned
char
*
e
=
&
gp
->
dev
->
dev_addr
[
0
];
...
...
@@ -1906,7 +1916,7 @@ static void gem_init_mac(struct gem *gp)
writel
(
0xffffffff
,
gp
->
regs
+
MAC_MCMASK
);
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_init_pause_thresholds
(
struct
gem
*
gp
)
{
u32
cfg
;
...
...
@@ -2042,7 +2052,7 @@ static int gem_check_invariants(struct gem *gp)
return
0
;
}
/* Must be invoked under gp->lock and
dev->xmit
_lock. */
/* Must be invoked under gp->lock and
gp->tx
_lock. */
static
void
gem_init_hw
(
struct
gem
*
gp
,
int
restart_link
)
{
/* On Apple's gmac, I initialize the PHY only after
...
...
@@ -2140,11 +2150,11 @@ static void gem_stop_phy(struct gem *gp)
if
(
!
gp
->
wake_on_lan
)
{
spin_lock_irqsave
(
&
gp
->
lock
,
flags
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gem_stop
(
gp
);
writel
(
MAC_TXRST_CMD
,
gp
->
regs
+
MAC_TXRST
);
writel
(
MAC_RXRST_CMD
,
gp
->
regs
+
MAC_RXRST
);
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irqrestore
(
&
gp
->
lock
,
flags
);
}
...
...
@@ -2192,9 +2202,9 @@ static void gem_shutdown(struct gem *gp)
unsigned
long
flags
;
spin_lock_irqsave
(
&
gp
->
lock
,
flags
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gem_stop
(
gp
);
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irqrestore
(
&
gp
->
lock
,
flags
);
}
}
...
...
@@ -2255,9 +2265,9 @@ static int gem_open(struct net_device *dev)
/* Reset the chip */
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gem_stop
(
gp
);
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
gp
->
hw_running
=
1
;
...
...
@@ -2271,7 +2281,7 @@ static int gem_open(struct net_device *dev)
printk
(
KERN_ERR
"%s: failed to request irq !
\n
"
,
gp
->
dev
->
name
);
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
#ifdef CONFIG_PPC_PMAC
if
(
!
hw_was_up
&&
gp
->
pdev
->
vendor
==
PCI_VENDOR_ID_APPLE
)
gem_apple_powerdown
(
gp
);
...
...
@@ -2280,14 +2290,14 @@ static int gem_open(struct net_device *dev)
gp
->
pm_timer
.
expires
=
jiffies
+
10
*
HZ
;
add_timer
(
&
gp
->
pm_timer
);
up
(
&
gp
->
pm_sem
);
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
return
-
EAGAIN
;
}
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
/* Allocate & setup ring buffers */
gem_init_rings
(
gp
);
...
...
@@ -2297,7 +2307,7 @@ static int gem_open(struct net_device *dev)
gp
->
opened
=
1
;
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
up
(
&
gp
->
pm_sem
);
...
...
@@ -2318,7 +2328,7 @@ static int gem_close(struct net_device *dev)
/* Stop traffic, mark us closed */
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gp
->
opened
=
0
;
...
...
@@ -2333,7 +2343,7 @@ static int gem_close(struct net_device *dev)
/* Bye, the pm timer will finish the job */
free_irq
(
gp
->
pdev
->
irq
,
(
void
*
)
dev
);
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
/* Fire the PM timer that will shut us down in about 10 seconds */
...
...
@@ -2364,7 +2374,7 @@ static int gem_suspend(struct pci_dev *pdev, u32 state)
/* If the driver is opened, we stop the DMA */
if
(
gp
->
opened
)
{
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
/* Stop traffic, mark us closed */
netif_device_detach
(
dev
);
...
...
@@ -2375,7 +2385,7 @@ static int gem_suspend(struct pci_dev *pdev, u32 state)
/* Get rid of ring buffers */
gem_clean_rings
(
gp
);
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
if
(
gp
->
pdev
->
vendor
==
PCI_VENDOR_ID_APPLE
)
...
...
@@ -2409,14 +2419,14 @@ static int gem_resume(struct pci_dev *pdev)
}
#endif
/* CONFIG_PPC_PMAC */
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gem_stop
(
gp
);
gp
->
hw_running
=
1
;
gem_init_rings
(
gp
);
gem_init_hw
(
gp
,
1
);
spin_unlock
(
&
gp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
netif_device_attach
(
dev
);
...
...
@@ -2437,7 +2447,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
struct
net_device_stats
*
stats
=
&
gp
->
net_stats
;
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
if
(
gp
->
hw_running
)
{
stats
->
rx_crc_errors
+=
readl
(
gp
->
regs
+
MAC_FCSERR
);
...
...
@@ -2457,13 +2467,12 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
writel
(
0
,
gp
->
regs
+
MAC_LCOLL
);
}
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
return
&
gp
->
net_stats
;
}
/* Called with dev->xmit_lock held and IRQs disabled. */
static
void
gem_set_multicast
(
struct
net_device
*
dev
)
{
struct
gem
*
gp
=
dev
->
priv
;
...
...
@@ -2473,6 +2482,9 @@ static void gem_set_multicast(struct net_device *dev)
if
(
!
gp
->
hw_running
)
return
;
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
gp
->
tx_lock
);
netif_stop_queue
(
dev
);
rxcfg
=
readl
(
gp
->
regs
+
MAC_RXCFG
);
...
...
@@ -2495,6 +2507,9 @@ static void gem_set_multicast(struct net_device *dev)
writel
(
rxcfg
,
gp
->
regs
+
MAC_RXCFG
);
netif_wake_queue
(
dev
);
spin_unlock
(
&
gp
->
tx_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
}
static
void
gem_get_drvinfo
(
struct
net_device
*
dev
,
struct
ethtool_drvinfo
*
info
)
...
...
@@ -2525,7 +2540,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* Return current PHY settings */
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
cmd
->
autoneg
=
gp
->
want_autoneg
;
cmd
->
speed
=
gp
->
phy_mii
.
speed
;
cmd
->
duplex
=
gp
->
phy_mii
.
duplex
;
...
...
@@ -2537,7 +2552,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*/
if
(
cmd
->
advertising
==
0
)
cmd
->
advertising
=
cmd
->
supported
;
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
}
else
{
// XXX PCS ?
cmd
->
supported
=
...
...
@@ -2577,9 +2592,9 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* Apply settings and restart link process. */
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gem_begin_auto_negotiation
(
gp
,
cmd
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
return
0
;
...
...
@@ -2594,9 +2609,9 @@ static int gem_nway_reset(struct net_device *dev)
/* Restart link process. */
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gem_begin_auto_negotiation
(
gp
,
NULL
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
return
0
;
...
...
@@ -2848,6 +2863,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gp
->
msg_enable
=
DEFAULT_MSG
;
spin_lock_init
(
&
gp
->
lock
);
spin_lock_init
(
&
gp
->
tx_lock
);
init_MUTEX
(
&
gp
->
pm_sem
);
init_timer
(
&
gp
->
link_timer
);
...
...
@@ -2883,9 +2899,9 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gem_apple_powerup
(
gp
);
#endif
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gem_stop
(
gp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
/* Fill up the mii_phy structure (even if we won't use it) */
...
...
@@ -2951,11 +2967,11 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
/* Detect & init PHY, start autoneg */
spin_lock_irq
(
&
gp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
gp
->
tx
_lock
);
gp
->
hw_running
=
1
;
gem_init_phy
(
gp
);
gem_begin_auto_negotiation
(
gp
,
NULL
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
gp
->
tx
_lock
);
spin_unlock_irq
(
&
gp
->
lock
);
if
(
gp
->
phy_type
==
phy_mii_mdio0
||
...
...
@@ -2966,7 +2982,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
pci_set_drvdata
(
pdev
,
dev
);
/* GEM can do it all... */
dev
->
features
|=
NETIF_F_SG
|
NETIF_F_HW_CSUM
;
dev
->
features
|=
NETIF_F_SG
|
NETIF_F_HW_CSUM
|
NETIF_F_LLTX
;
if
(
pci_using_dac
)
dev
->
features
|=
NETIF_F_HIGHDMA
;
...
...
drivers/net/sungem.h
View file @
e1acfd06
...
...
@@ -953,6 +953,7 @@ enum link_state {
struct
gem
{
spinlock_t
lock
;
spinlock_t
tx_lock
;
void
__iomem
*
regs
;
int
rx_new
,
rx_old
;
int
tx_new
,
tx_old
;
...
...
drivers/net/tg3.c
View file @
e1acfd06
...
...
@@ -60,8 +60,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.1
6
"
#define DRV_MODULE_RELDATE "January
17
, 2005"
#define DRV_MODULE_VERSION "3.1
7
"
#define DRV_MODULE_RELDATE "January
22
, 2005"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
...
...
@@ -2816,9 +2816,9 @@ static int tg3_poll(struct net_device *netdev, int *budget)
/* run TX completion thread */
if
(
sblk
->
idx
[
0
].
tx_consumer
!=
tp
->
tx_cons
)
{
spin_lock
(
&
netdev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tg3_tx
(
tp
);
spin_unlock
(
&
netdev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
}
spin_unlock_irqrestore
(
&
tp
->
lock
,
flags
);
...
...
@@ -2939,7 +2939,7 @@ static void tg3_reset_task(void *_data)
tg3_netif_stop
(
tp
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
tp
->
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
restart_timer
=
tp
->
tg3_flags2
&
TG3_FLG2_RESTART_TIMER
;
tp
->
tg3_flags2
&=
~
TG3_FLG2_RESTART_TIMER
;
...
...
@@ -2949,7 +2949,7 @@ static void tg3_reset_task(void *_data)
tg3_netif_start
(
tp
);
spin_unlock
(
&
tp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
if
(
restart_timer
)
...
...
@@ -3048,7 +3048,6 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
(
base
+
len
+
8
<
base
));
}
/* dev->xmit_lock is held and IRQs are disabled. */
static
int
tg3_start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
{
struct
tg3
*
tp
=
netdev_priv
(
dev
);
...
...
@@ -3056,12 +3055,39 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned
int
i
;
u32
len
,
entry
,
base_flags
,
mss
;
int
would_hit_hwbug
;
unsigned
long
flags
;
len
=
skb_headlen
(
skb
);
/* No BH disabling for tx_lock here. We are running in BH disabled
* context and TX reclaim runs via tp->poll inside of a software
* interrupt. Rejoice!
*
* Actually, things are not so simple. If we are to take a hw
* IRQ here, we can deadlock, consider:
*
* CPU1 CPU2
* tg3_start_xmit
* take tp->tx_lock
* tg3_timer
* take tp->lock
* tg3_interrupt
* spin on tp->lock
* spin on tp->tx_lock
*
* So we really do need to disable interrupts when taking
* tx_lock here.
*/
local_irq_save
(
flags
);
if
(
!
spin_trylock
(
&
tp
->
tx_lock
))
{
local_irq_restore
(
flags
);
return
NETDEV_TX_LOCKED
;
}
/* This is a hard error, log it. */
if
(
unlikely
(
TX_BUFFS_AVAIL
(
tp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
)))
{
netif_stop_queue
(
dev
);
spin_unlock_irqrestore
(
&
tp
->
tx_lock
,
flags
);
printk
(
KERN_ERR
PFX
"%s: BUG! Tx Ring full when queue awake!
\n
"
,
dev
->
name
);
return
NETDEV_TX_BUSY
;
...
...
@@ -3198,7 +3224,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry
,
len
,
last_plus_one
,
&
start
,
mss
))
goto
out
;
goto
out
_unlock
;
entry
=
start
;
}
...
...
@@ -3210,8 +3236,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if
(
TX_BUFFS_AVAIL
(
tp
)
<=
(
MAX_SKB_FRAGS
+
1
))
netif_stop_queue
(
dev
);
out:
out
_unlock
:
mmiowb
();
spin_unlock_irqrestore
(
&
tp
->
tx_lock
,
flags
);
dev
->
trans_start
=
jiffies
;
...
...
@@ -3246,7 +3273,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_stop
(
tp
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tg3_halt
(
tp
);
...
...
@@ -3256,7 +3283,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start
(
tp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
...
...
@@ -5547,7 +5574,7 @@ static void tg3_timer(unsigned long __opaque)
unsigned
long
flags
;
spin_lock_irqsave
(
&
tp
->
lock
,
flags
);
spin_lock
(
&
tp
->
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
/* All of this garbage is because when using non-tagged
* IRQ status the mailbox/status_block protocol the chip
...
...
@@ -5563,7 +5590,7 @@ static void tg3_timer(unsigned long __opaque)
if
(
!
(
tr32
(
WDMAC_MODE
)
&
WDMAC_MODE_ENABLE
))
{
tp
->
tg3_flags2
|=
TG3_FLG2_RESTART_TIMER
;
spin_unlock
(
&
tp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irqrestore
(
&
tp
->
lock
,
flags
);
schedule_work
(
&
tp
->
reset_task
);
return
;
...
...
@@ -5632,7 +5659,7 @@ static void tg3_timer(unsigned long __opaque)
tp
->
asf_counter
=
tp
->
asf_multiplier
;
}
spin_unlock
(
&
tp
->
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irqrestore
(
&
tp
->
lock
,
flags
);
tp
->
timer
.
expires
=
jiffies
+
tp
->
timer_offset
;
...
...
@@ -5645,12 +5672,12 @@ static int tg3_open(struct net_device *dev)
int
err
;
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tg3_disable_ints
(
tp
);
tp
->
tg3_flags
&=
~
TG3_FLAG_INIT_COMPLETE
;
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
/* The placement of this call is tied
...
...
@@ -5669,7 +5696,7 @@ static int tg3_open(struct net_device *dev)
}
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
err
=
tg3_init_hw
(
tp
);
if
(
err
)
{
...
...
@@ -5689,7 +5716,7 @@ static int tg3_open(struct net_device *dev)
tp
->
tg3_flags
|=
TG3_FLAG_INIT_COMPLETE
;
}
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
if
(
err
)
{
...
...
@@ -5699,11 +5726,11 @@ static int tg3_open(struct net_device *dev)
}
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tg3_enable_ints
(
tp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
netif_start_queue
(
dev
);
...
...
@@ -5951,7 +5978,7 @@ static int tg3_close(struct net_device *dev)
del_timer_sync
(
&
tp
->
timer
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
#if 0
tg3_dump_state(tp);
#endif
...
...
@@ -5965,7 +5992,7 @@ static int tg3_close(struct net_device *dev)
TG3_FLAG_GOT_SERDES_FLOWCTL
);
netif_carrier_off
(
tp
->
dev
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
free_irq
(
dev
->
irq
,
dev
);
...
...
@@ -6264,10 +6291,15 @@ static void __tg3_set_rx_mode(struct net_device *dev)
}
}
/* Called with dev->xmit_lock held and IRQs disabled. */
static
void
tg3_set_rx_mode
(
struct
net_device
*
dev
)
{
struct
tg3
*
tp
=
netdev_priv
(
dev
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
tp
->
tx_lock
);
__tg3_set_rx_mode
(
dev
);
spin_unlock
(
&
tp
->
tx_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
}
#define TG3_REGDUMP_LEN (32 * 1024)
...
...
@@ -6290,7 +6322,7 @@ static void tg3_get_regs(struct net_device *dev,
memset
(
p
,
0
,
TG3_REGDUMP_LEN
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
#define __GET_REG32(reg) (*(p)++ = tr32(reg))
#define GET_REG32_LOOP(base,len) \
...
...
@@ -6340,17 +6372,19 @@ do { p = (u32 *)(orig_p + (reg)); \
#undef GET_REG32_LOOP
#undef GET_REG32_1
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
}
static
int
tg3_get_eeprom_len
(
struct
net_device
*
dev
)
{
return
EEPROM_CHIP_SIZE
;
struct
tg3
*
tp
=
netdev_priv
(
dev
);
return
tp
->
nvram_size
;
}
static
int
tg3_nvram_read
_using_eeprom
(
struct
tg3
*
tp
,
u32
offset
,
u32
*
val
);
static
int
tg3_nvram_read
(
struct
tg3
*
tp
,
u32
offset
,
u32
*
val
);
static
int
tg3_get_eeprom
(
struct
net_device
*
dev
,
struct
ethtool_eeprom
*
eeprom
,
u8
*
data
)
{
struct
tg3
*
tp
=
netdev_priv
(
dev
);
...
...
@@ -6362,10 +6396,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
len
=
eeprom
->
len
;
eeprom
->
len
=
0
;
ret
=
tg3_nvram_read_using_eeprom
(
tp
,
0
,
&
eeprom
->
magic
);
if
(
ret
)
return
ret
;
eeprom
->
magic
=
swab32
(
eeprom
->
magic
);
eeprom
->
magic
=
TG3_EEPROM_MAGIC
;
if
(
offset
&
3
)
{
/* adjustments to start on required 4 byte boundary */
...
...
@@ -6375,9 +6406,10 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
/* i.e. offset=1 len=2 */
b_count
=
len
;
}
ret
=
tg3_nvram_read
_using_eeprom
(
tp
,
offset
-
b_offset
,
&
val
);
ret
=
tg3_nvram_read
(
tp
,
offset
-
b_offset
,
&
val
);
if
(
ret
)
return
ret
;
val
=
cpu_to_le32
(
val
);
memcpy
(
data
,
((
char
*
)
&
val
)
+
b_offset
,
b_count
);
len
-=
b_count
;
offset
+=
b_count
;
...
...
@@ -6387,12 +6419,13 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
/* read bytes upto the last 4 byte boundary */
pd
=
&
data
[
eeprom
->
len
];
for
(
i
=
0
;
i
<
(
len
-
(
len
&
3
));
i
+=
4
)
{
ret
=
tg3_nvram_read_using_eeprom
(
tp
,
offset
+
i
,
(
u32
*
)(
pd
+
i
));
ret
=
tg3_nvram_read
(
tp
,
offset
+
i
,
&
val
);
if
(
ret
)
{
eeprom
->
len
+=
i
;
return
ret
;
}
val
=
cpu_to_le32
(
val
);
memcpy
(
pd
+
i
,
&
val
,
4
);
}
eeprom
->
len
+=
i
;
...
...
@@ -6401,15 +6434,72 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
pd
=
&
data
[
eeprom
->
len
];
b_count
=
len
&
3
;
b_offset
=
offset
+
len
-
b_count
;
ret
=
tg3_nvram_read
_using_eeprom
(
tp
,
b_offset
,
&
val
);
ret
=
tg3_nvram_read
(
tp
,
b_offset
,
&
val
);
if
(
ret
)
return
ret
;
val
=
cpu_to_le32
(
val
);
memcpy
(
pd
,
((
char
*
)
&
val
),
b_count
);
eeprom
->
len
+=
b_count
;
}
return
0
;
}
static
int
tg3_nvram_write_block
(
struct
tg3
*
tp
,
u32
offset
,
u32
len
,
u8
*
buf
);
static
int
tg3_set_eeprom
(
struct
net_device
*
dev
,
struct
ethtool_eeprom
*
eeprom
,
u8
*
data
)
{
struct
tg3
*
tp
=
netdev_priv
(
dev
);
int
ret
;
u32
offset
,
len
,
b_offset
,
odd_len
,
start
,
end
;
u8
*
buf
;
if
(
eeprom
->
magic
!=
TG3_EEPROM_MAGIC
)
return
-
EINVAL
;
offset
=
eeprom
->
offset
;
len
=
eeprom
->
len
;
if
((
b_offset
=
(
offset
&
3
)))
{
/* adjustments to start on required 4 byte boundary */
ret
=
tg3_nvram_read
(
tp
,
offset
-
b_offset
,
&
start
);
if
(
ret
)
return
ret
;
start
=
cpu_to_le32
(
start
);
len
+=
b_offset
;
offset
&=
~
3
;
}
odd_len
=
0
;
if
((
len
&
3
)
&&
((
len
>
4
)
||
(
b_offset
==
0
)))
{
/* adjustments to end on required 4 byte boundary */
odd_len
=
1
;
len
=
(
len
+
3
)
&
~
3
;
ret
=
tg3_nvram_read
(
tp
,
offset
+
len
-
4
,
&
end
);
if
(
ret
)
return
ret
;
end
=
cpu_to_le32
(
end
);
}
buf
=
data
;
if
(
b_offset
||
odd_len
)
{
buf
=
kmalloc
(
len
,
GFP_KERNEL
);
if
(
buf
==
0
)
return
-
ENOMEM
;
if
(
b_offset
)
memcpy
(
buf
,
&
start
,
4
);
if
(
odd_len
)
memcpy
(
buf
+
len
-
4
,
&
end
,
4
);
memcpy
(
buf
+
b_offset
,
data
,
eeprom
->
len
);
}
ret
=
tg3_nvram_write_block
(
tp
,
offset
,
len
,
buf
);
if
(
buf
!=
data
)
kfree
(
buf
);
return
ret
;
}
static
int
tg3_get_settings
(
struct
net_device
*
dev
,
struct
ethtool_cmd
*
cmd
)
{
struct
tg3
*
tp
=
netdev_priv
(
dev
);
...
...
@@ -6464,7 +6554,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tp
->
link_config
.
autoneg
=
cmd
->
autoneg
;
if
(
cmd
->
autoneg
==
AUTONEG_ENABLE
)
{
...
...
@@ -6478,7 +6568,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
tg3_setup_phy
(
tp
,
1
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
...
...
@@ -6595,7 +6685,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tg3_netif_stop
(
tp
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tp
->
rx_pending
=
ering
->
rx_pending
;
...
...
@@ -6608,7 +6698,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tg3_halt
(
tp
);
tg3_init_hw
(
tp
);
tg3_netif_start
(
tp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
...
...
@@ -6629,7 +6719,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_netif_stop
(
tp
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
if
(
epause
->
autoneg
)
tp
->
tg3_flags
|=
TG3_FLAG_PAUSE_AUTONEG
;
else
...
...
@@ -6645,7 +6735,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_halt
(
tp
);
tg3_init_hw
(
tp
);
tg3_netif_start
(
tp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
...
...
@@ -6771,14 +6861,14 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
struct
tg3
*
tp
=
netdev_priv
(
dev
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tp
->
vlgrp
=
grp
;
/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
__tg3_set_rx_mode
(
dev
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
}
...
...
@@ -6787,10 +6877,10 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct
tg3
*
tp
=
netdev_priv
(
dev
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
if
(
tp
->
vlgrp
)
tp
->
vlgrp
->
vlan_devices
[
vid
]
=
NULL
;
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
}
#endif
...
...
@@ -6809,6 +6899,7 @@ static struct ethtool_ops tg3_ethtool_ops = {
.
get_link
=
ethtool_op_get_link
,
.
get_eeprom_len
=
tg3_get_eeprom_len
,
.
get_eeprom
=
tg3_get_eeprom
,
.
set_eeprom
=
tg3_set_eeprom
,
.
get_ringparam
=
tg3_get_ringparam
,
.
set_ringparam
=
tg3_set_ringparam
,
.
get_pauseparam
=
tg3_get_pauseparam
,
...
...
@@ -6828,6 +6919,103 @@ static struct ethtool_ops tg3_ethtool_ops = {
.
get_ethtool_stats
=
tg3_get_ethtool_stats
,
};
static
void
__devinit
tg3_get_eeprom_size
(
struct
tg3
*
tp
)
{
u32
cursize
,
val
;
tp
->
nvram_size
=
EEPROM_CHIP_SIZE
;
if
(
tg3_nvram_read
(
tp
,
0
,
&
val
)
!=
0
)
return
;
if
(
swab32
(
val
)
!=
TG3_EEPROM_MAGIC
)
return
;
/*
* Size the chip by reading offsets at increasing powers of two.
* When we encounter our validation signature, we know the addressing
* has wrapped around, and thus have our chip size.
*/
cursize
=
0x800
;
while
(
cursize
<
tp
->
nvram_size
)
{
if
(
tg3_nvram_read
(
tp
,
cursize
,
&
val
)
!=
0
)
return
;
if
(
swab32
(
val
)
==
TG3_EEPROM_MAGIC
)
break
;
cursize
<<=
1
;
}
tp
->
nvram_size
=
cursize
;
}
static
void
__devinit
tg3_get_nvram_size
(
struct
tg3
*
tp
)
{
u32
val
;
if
(
tg3_nvram_read
(
tp
,
0xf0
,
&
val
)
==
0
)
{
if
(
val
!=
0
)
{
tp
->
nvram_size
=
(
val
>>
16
)
*
1024
;
return
;
}
}
tp
->
nvram_size
=
0x20000
;
}
static
void
__devinit
tg3_get_nvram_info
(
struct
tg3
*
tp
)
{
u32
nvcfg1
;
nvcfg1
=
tr32
(
NVRAM_CFG1
);
if
(
nvcfg1
&
NVRAM_CFG1_FLASHIF_ENAB
)
{
tp
->
tg3_flags2
|=
TG3_FLG2_FLASH
;
}
else
{
nvcfg1
&=
~
NVRAM_CFG1_COMPAT_BYPASS
;
tw32
(
NVRAM_CFG1
,
nvcfg1
);
}
if
(
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5750
)
{
switch
(
nvcfg1
&
NVRAM_CFG1_VENDOR_MASK
)
{
case
FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
tp
->
nvram_jedecnum
=
JEDEC_ATMEL
;
tp
->
nvram_pagesize
=
ATMEL_AT45DB0X1B_PAGE_SIZE
;
tp
->
tg3_flags
|=
TG3_FLAG_NVRAM_BUFFERED
;
break
;
case
FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
tp
->
nvram_jedecnum
=
JEDEC_ATMEL
;
tp
->
nvram_pagesize
=
ATMEL_AT25F512_PAGE_SIZE
;
break
;
case
FLASH_VENDOR_ATMEL_EEPROM
:
tp
->
nvram_jedecnum
=
JEDEC_ATMEL
;
tp
->
nvram_pagesize
=
ATMEL_AT24C512_CHIP_SIZE
;
tp
->
tg3_flags
|=
TG3_FLAG_NVRAM_BUFFERED
;
break
;
case
FLASH_VENDOR_ST
:
tp
->
nvram_jedecnum
=
JEDEC_ST
;
tp
->
nvram_pagesize
=
ST_M45PEX0_PAGE_SIZE
;
tp
->
tg3_flags
|=
TG3_FLAG_NVRAM_BUFFERED
;
break
;
case
FLASH_VENDOR_SAIFUN
:
tp
->
nvram_jedecnum
=
JEDEC_SAIFUN
;
tp
->
nvram_pagesize
=
SAIFUN_SA25F0XX_PAGE_SIZE
;
break
;
case
FLASH_VENDOR_SST_SMALL
:
case
FLASH_VENDOR_SST_LARGE
:
tp
->
nvram_jedecnum
=
JEDEC_SST
;
tp
->
nvram_pagesize
=
SST_25VF0X0_PAGE_SIZE
;
break
;
}
}
else
{
tp
->
nvram_jedecnum
=
JEDEC_ATMEL
;
tp
->
nvram_pagesize
=
ATMEL_AT45DB0X1B_PAGE_SIZE
;
tp
->
tg3_flags
|=
TG3_FLAG_NVRAM_BUFFERED
;
}
}
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
static
void
__devinit
tg3_nvram_init
(
struct
tg3
*
tp
)
{
...
...
@@ -6852,32 +7040,27 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
if
(
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
!=
ASIC_REV_5700
&&
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
!=
ASIC_REV_5701
)
{
u32
nvcfg1
;
tp
->
tg3_flags
|=
TG3_FLAG_NVRAM
;
if
(
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5750
)
{
u32
nvaccess
=
tr32
(
NVRAM_ACCESS
);
tw32
_f
(
NVRAM_ACCESS
,
nvaccess
|
ACCESS_ENABLE
);
tw32
(
NVRAM_ACCESS
,
nvaccess
|
ACCESS_ENABLE
);
}
nvcfg1
=
tr32
(
NVRAM_CFG1
);
tp
->
tg3_flags
|=
TG3_FLAG_NVRAM
;
if
(
nvcfg1
&
NVRAM_CFG1_FLASHIF_ENAB
)
{
if
(
nvcfg1
&
NVRAM_CFG1_BUFFERED_MODE
)
tp
->
tg3_flags
|=
TG3_FLAG_NVRAM_BUFFERED
;
}
else
{
nvcfg1
&=
~
NVRAM_CFG1_COMPAT_BYPASS
;
tw32
(
NVRAM_CFG1
,
nvcfg1
);
}
tg3_get_nvram_info
(
tp
);
tg3_get_nvram_size
(
tp
);
if
(
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5750
)
{
u32
nvaccess
=
tr32
(
NVRAM_ACCESS
);
tw32
_f
(
NVRAM_ACCESS
,
nvaccess
&
~
ACCESS_ENABLE
);
tw32
(
NVRAM_ACCESS
,
nvaccess
&
~
ACCESS_ENABLE
);
}
}
else
{
tp
->
tg3_flags
&=
~
(
TG3_FLAG_NVRAM
|
TG3_FLAG_NVRAM_BUFFERED
);
tg3_get_eeprom_size
(
tp
);
}
}
...
...
@@ -6915,11 +7098,30 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
return
0
;
}
static
int
__devinit
tg3_nvram_read
(
struct
tg3
*
tp
,
u32
offset
,
u32
*
val
)
#define NVRAM_CMD_TIMEOUT 10000
static
int
tg3_nvram_exec_cmd
(
struct
tg3
*
tp
,
u32
nvram_cmd
)
{
int
i
;
tw32
(
NVRAM_CMD
,
nvram_cmd
);
for
(
i
=
0
;
i
<
NVRAM_CMD_TIMEOUT
;
i
++
)
{
udelay
(
10
);
if
(
tr32
(
NVRAM_CMD
)
&
NVRAM_CMD_DONE
)
{
udelay
(
10
);
break
;
}
}
if
(
i
==
NVRAM_CMD_TIMEOUT
)
{
return
-
EBUSY
;
}
return
0
;
}
static
int
tg3_nvram_read
(
struct
tg3
*
tp
,
u32
offset
,
u32
*
val
)
{
int
ret
;
if
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
)
{
printk
(
KERN_ERR
PFX
"Attempt to do nvram_read on Sun 570X
\n
"
);
return
-
EINVAL
;
...
...
@@ -6928,10 +7130,14 @@ static int __devinit tg3_nvram_read(struct tg3 *tp,
if
(
!
(
tp
->
tg3_flags
&
TG3_FLAG_NVRAM
))
return
tg3_nvram_read_using_eeprom
(
tp
,
offset
,
val
);
if
(
tp
->
tg3_flags
&
TG3_FLAG_NVRAM_BUFFERED
)
offset
=
((
offset
/
NVRAM_BUFFERED_PAGE_SIZE
)
<<
NVRAM_BUFFERED_PAGE_POS
)
+
(
offset
%
NVRAM_BUFFERED_PAGE_SIZE
);
if
((
tp
->
tg3_flags
&
TG3_FLAG_NVRAM_BUFFERED
)
&&
(
tp
->
tg3_flags2
&
TG3_FLG2_FLASH
)
&&
(
tp
->
nvram_jedecnum
==
JEDEC_ATMEL
))
{
offset
=
((
offset
/
tp
->
nvram_pagesize
)
<<
ATMEL_AT45DB0X1B_PAGE_POS
)
+
(
offset
%
tp
->
nvram_pagesize
);
}
if
(
offset
>
NVRAM_ADDR_MSK
)
return
-
EINVAL
;
...
...
@@ -6945,19 +7151,11 @@ static int __devinit tg3_nvram_read(struct tg3 *tp,
}
tw32
(
NVRAM_ADDR
,
offset
);
tw32
(
NVRAM_CMD
,
NVRAM_CMD_RD
|
NVRAM_CMD_GO
|
NVRAM_CMD_FIRST
|
NVRAM_CMD_LAST
|
NVRAM_CMD_DONE
);
ret
=
tg3_nvram_exec_cmd
(
tp
,
NVRAM_CMD_RD
|
NVRAM_CMD_GO
|
NVRAM_CMD_FIRST
|
NVRAM_CMD_LAST
|
NVRAM_CMD_DONE
);
/* Wait for done bit to clear. */
for
(
i
=
0
;
i
<
1000
;
i
++
)
{
udelay
(
10
);
if
(
tr32
(
NVRAM_CMD
)
&
NVRAM_CMD_DONE
)
{
udelay
(
10
);
*
val
=
swab32
(
tr32
(
NVRAM_RDDATA
));
break
;
}
}
if
(
ret
==
0
)
*
val
=
swab32
(
tr32
(
NVRAM_RDDATA
));
tg3_nvram_unlock
(
tp
);
...
...
@@ -6967,10 +7165,268 @@ static int __devinit tg3_nvram_read(struct tg3 *tp,
tw32_f
(
NVRAM_ACCESS
,
nvaccess
&
~
ACCESS_ENABLE
);
}
if
(
i
>=
1000
)
return
-
EBUSY
;
return
ret
;
}
return
0
;
static
int
tg3_nvram_write_block_using_eeprom
(
struct
tg3
*
tp
,
u32
offset
,
u32
len
,
u8
*
buf
)
{
int
i
,
j
,
rc
=
0
;
u32
val
;
for
(
i
=
0
;
i
<
len
;
i
+=
4
)
{
u32
addr
,
data
;
addr
=
offset
+
i
;
memcpy
(
&
data
,
buf
+
i
,
4
);
tw32
(
GRC_EEPROM_DATA
,
cpu_to_le32
(
data
));
val
=
tr32
(
GRC_EEPROM_ADDR
);
tw32
(
GRC_EEPROM_ADDR
,
val
|
EEPROM_ADDR_COMPLETE
);
val
&=
~
(
EEPROM_ADDR_ADDR_MASK
|
EEPROM_ADDR_DEVID_MASK
|
EEPROM_ADDR_READ
);
tw32
(
GRC_EEPROM_ADDR
,
val
|
(
0
<<
EEPROM_ADDR_DEVID_SHIFT
)
|
(
addr
&
EEPROM_ADDR_ADDR_MASK
)
|
EEPROM_ADDR_START
|
EEPROM_ADDR_WRITE
);
for
(
j
=
0
;
j
<
10000
;
j
++
)
{
val
=
tr32
(
GRC_EEPROM_ADDR
);
if
(
val
&
EEPROM_ADDR_COMPLETE
)
break
;
udelay
(
100
);
}
if
(
!
(
val
&
EEPROM_ADDR_COMPLETE
))
{
rc
=
-
EBUSY
;
break
;
}
}
return
rc
;
}
/* offset and length are dword aligned */
static
int
tg3_nvram_write_block_unbuffered
(
struct
tg3
*
tp
,
u32
offset
,
u32
len
,
u8
*
buf
)
{
int
ret
=
0
;
u32
pagesize
=
tp
->
nvram_pagesize
;
u32
pagemask
=
pagesize
-
1
;
u32
nvram_cmd
;
u8
*
tmp
;
tmp
=
kmalloc
(
pagesize
,
GFP_KERNEL
);
if
(
tmp
==
NULL
)
return
-
ENOMEM
;
while
(
len
)
{
int
j
;
u32
phy_addr
,
page_off
,
size
,
nvaccess
;
phy_addr
=
offset
&
~
pagemask
;
for
(
j
=
0
;
j
<
pagesize
;
j
+=
4
)
{
if
((
ret
=
tg3_nvram_read
(
tp
,
phy_addr
+
j
,
(
u32
*
)
(
tmp
+
j
))))
break
;
}
if
(
ret
)
break
;
page_off
=
offset
&
pagemask
;
size
=
pagesize
;
if
(
len
<
size
)
size
=
len
;
len
-=
size
;
memcpy
(
tmp
+
page_off
,
buf
,
size
);
offset
=
offset
+
(
pagesize
-
page_off
);
nvaccess
=
tr32
(
NVRAM_ACCESS
);
tw32_f
(
NVRAM_ACCESS
,
nvaccess
|
ACCESS_ENABLE
);
/*
* Before we can erase the flash page, we need
* to issue a special "write enable" command.
*/
nvram_cmd
=
NVRAM_CMD_WREN
|
NVRAM_CMD_GO
|
NVRAM_CMD_DONE
;
if
(
tg3_nvram_exec_cmd
(
tp
,
nvram_cmd
))
break
;
/* Erase the target page */
tw32
(
NVRAM_ADDR
,
phy_addr
);
nvram_cmd
=
NVRAM_CMD_GO
|
NVRAM_CMD_DONE
|
NVRAM_CMD_WR
|
NVRAM_CMD_FIRST
|
NVRAM_CMD_LAST
|
NVRAM_CMD_ERASE
;
if
(
tg3_nvram_exec_cmd
(
tp
,
nvram_cmd
))
break
;
/* Issue another write enable to start the write. */
nvram_cmd
=
NVRAM_CMD_WREN
|
NVRAM_CMD_GO
|
NVRAM_CMD_DONE
;
if
(
tg3_nvram_exec_cmd
(
tp
,
nvram_cmd
))
break
;
for
(
j
=
0
;
j
<
pagesize
;
j
+=
4
)
{
u32
data
;
data
=
*
((
u32
*
)
(
tmp
+
j
));
tw32
(
NVRAM_WRDATA
,
cpu_to_be32
(
data
));
tw32
(
NVRAM_ADDR
,
phy_addr
+
j
);
nvram_cmd
=
NVRAM_CMD_GO
|
NVRAM_CMD_DONE
|
NVRAM_CMD_WR
;
if
(
j
==
0
)
nvram_cmd
|=
NVRAM_CMD_FIRST
;
else
if
(
j
==
(
pagesize
-
4
))
nvram_cmd
|=
NVRAM_CMD_LAST
;
if
((
ret
=
tg3_nvram_exec_cmd
(
tp
,
nvram_cmd
)))
break
;
}
if
(
ret
)
break
;
}
nvram_cmd
=
NVRAM_CMD_WRDI
|
NVRAM_CMD_GO
|
NVRAM_CMD_DONE
;
tg3_nvram_exec_cmd
(
tp
,
nvram_cmd
);
kfree
(
tmp
);
return
ret
;
}
/* offset and length are dword aligned */
static
int
tg3_nvram_write_block_buffered
(
struct
tg3
*
tp
,
u32
offset
,
u32
len
,
u8
*
buf
)
{
int
i
,
ret
=
0
;
for
(
i
=
0
;
i
<
len
;
i
+=
4
,
offset
+=
4
)
{
u32
data
,
page_off
,
phy_addr
,
nvram_cmd
;
memcpy
(
&
data
,
buf
+
i
,
4
);
tw32
(
NVRAM_WRDATA
,
cpu_to_be32
(
data
));
page_off
=
offset
%
tp
->
nvram_pagesize
;
if
((
tp
->
tg3_flags2
&
TG3_FLG2_FLASH
)
&&
(
tp
->
nvram_jedecnum
==
JEDEC_ATMEL
))
{
phy_addr
=
((
offset
/
tp
->
nvram_pagesize
)
<<
ATMEL_AT45DB0X1B_PAGE_POS
)
+
page_off
;
}
else
{
phy_addr
=
offset
;
}
tw32
(
NVRAM_ADDR
,
phy_addr
);
nvram_cmd
=
NVRAM_CMD_GO
|
NVRAM_CMD_DONE
|
NVRAM_CMD_WR
;
if
((
page_off
==
0
)
||
(
i
==
0
))
nvram_cmd
|=
NVRAM_CMD_FIRST
;
else
if
(
page_off
==
(
tp
->
nvram_pagesize
-
4
))
nvram_cmd
|=
NVRAM_CMD_LAST
;
if
(
i
==
(
len
-
4
))
nvram_cmd
|=
NVRAM_CMD_LAST
;
if
((
tp
->
nvram_jedecnum
==
JEDEC_ST
)
&&
(
nvram_cmd
&
NVRAM_CMD_FIRST
))
{
if
((
ret
=
tg3_nvram_exec_cmd
(
tp
,
NVRAM_CMD_WREN
|
NVRAM_CMD_GO
|
NVRAM_CMD_DONE
)))
break
;
}
if
(
!
(
tp
->
tg3_flags2
&
TG3_FLG2_FLASH
))
{
/* We always do complete word writes to eeprom. */
nvram_cmd
|=
(
NVRAM_CMD_FIRST
|
NVRAM_CMD_LAST
);
}
if
((
ret
=
tg3_nvram_exec_cmd
(
tp
,
nvram_cmd
)))
break
;
}
return
ret
;
}
/* offset and length are dword aligned */
static
int
tg3_nvram_write_block
(
struct
tg3
*
tp
,
u32
offset
,
u32
len
,
u8
*
buf
)
{
int
ret
;
if
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
)
{
printk
(
KERN_ERR
PFX
"Attempt to do nvram_write on Sun 570X
\n
"
);
return
-
EINVAL
;
}
if
(
tp
->
tg3_flags
&
TG3_FLAG_EEPROM_WRITE_PROT
)
{
tw32_f
(
GRC_LOCAL_CTRL
,
tp
->
grc_local_ctrl
|
GRC_LCLCTRL_GPIO_OE1
);
udelay
(
40
);
}
if
(
!
(
tp
->
tg3_flags
&
TG3_FLAG_NVRAM
))
{
ret
=
tg3_nvram_write_block_using_eeprom
(
tp
,
offset
,
len
,
buf
);
}
else
{
u32
grc_mode
;
tg3_nvram_lock
(
tp
);
if
(
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5750
)
{
u32
nvaccess
=
tr32
(
NVRAM_ACCESS
);
tw32
(
NVRAM_ACCESS
,
nvaccess
|
ACCESS_ENABLE
);
tw32
(
NVRAM_WRITE1
,
0x406
);
}
grc_mode
=
tr32
(
GRC_MODE
);
tw32
(
GRC_MODE
,
grc_mode
|
GRC_MODE_NVRAM_WR_ENABLE
);
if
((
tp
->
tg3_flags
&
TG3_FLAG_NVRAM_BUFFERED
)
||
!
(
tp
->
tg3_flags2
&
TG3_FLG2_FLASH
))
{
ret
=
tg3_nvram_write_block_buffered
(
tp
,
offset
,
len
,
buf
);
}
else
{
ret
=
tg3_nvram_write_block_unbuffered
(
tp
,
offset
,
len
,
buf
);
}
grc_mode
=
tr32
(
GRC_MODE
);
tw32
(
GRC_MODE
,
grc_mode
&
~
GRC_MODE_NVRAM_WR_ENABLE
);
if
(
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5750
)
{
u32
nvaccess
=
tr32
(
NVRAM_ACCESS
);
tw32
(
NVRAM_ACCESS
,
nvaccess
&
~
ACCESS_ENABLE
);
}
tg3_nvram_unlock
(
tp
);
}
if
(
tp
->
tg3_flags
&
TG3_FLAG_EEPROM_WRITE_PROT
)
{
tw32_f
(
GRC_LOCAL_CTRL
,
tp
->
grc_local_ctrl
|
GRC_LCLCTRL_GPIO_OE1
|
GRC_LCLCTRL_GPIO_OUTPUT1
);
udelay
(
40
);
}
return
ret
;
}
struct
subsys_tbl_ent
{
...
...
@@ -8209,6 +8665,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if
(
pci_using_dac
)
dev
->
features
|=
NETIF_F_HIGHDMA
;
dev
->
features
|=
NETIF_F_LLTX
;
#if TG3_VLAN_TAG_USED
dev
->
features
|=
NETIF_F_HW_VLAN_TX
|
NETIF_F_HW_VLAN_RX
;
dev
->
vlan_rx_register
=
tg3_vlan_rx_register
;
...
...
@@ -8250,6 +8707,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp
->
grc_mode
|=
GRC_MODE_BSWAP_NONFRM_DATA
;
#endif
spin_lock_init
(
&
tp
->
lock
);
spin_lock_init
(
&
tp
->
tx_lock
);
spin_lock_init
(
&
tp
->
indirect_lock
);
INIT_WORK
(
&
tp
->
reset_task
,
tg3_reset_task
,
tp
);
...
...
@@ -8462,23 +8920,23 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state)
del_timer_sync
(
&
tp
->
timer
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tg3_disable_ints
(
tp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
netif_device_detach
(
dev
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tg3_halt
(
tp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
err
=
tg3_set_power_state
(
tp
,
state
);
if
(
err
)
{
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tg3_init_hw
(
tp
);
...
...
@@ -8488,7 +8946,7 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state)
netif_device_attach
(
dev
);
tg3_netif_start
(
tp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
}
...
...
@@ -8513,7 +8971,7 @@ static int tg3_resume(struct pci_dev *pdev)
netif_device_attach
(
dev
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock
(
&
dev
->
xmit
_lock
);
spin_lock
(
&
tp
->
tx
_lock
);
tg3_init_hw
(
tp
);
...
...
@@ -8524,7 +8982,7 @@ static int tg3_resume(struct pci_dev *pdev)
tg3_netif_start
(
tp
);
spin_unlock
(
&
dev
->
xmit
_lock
);
spin_unlock
(
&
tp
->
tx
_lock
);
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
...
...
drivers/net/tg3.h
View file @
e1acfd06
...
...
@@ -1274,6 +1274,7 @@
#define GRC_MODE_HOST_STACKUP 0x00010000
#define GRC_MODE_HOST_SENDBDS 0x00020000
#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
...
...
@@ -1366,6 +1367,8 @@
#define NVRAM_CMD_ERASE 0x00000040
#define NVRAM_CMD_FIRST 0x00000080
#define NVRAM_CMD_LAST 0x00000100
#define NVRAM_CMD_WREN 0x00010000
#define NVRAM_CMD_WRDI 0x00020000
#define NVRAM_STAT 0x00007004
#define NVRAM_WRDATA 0x00007008
#define NVRAM_ADDR 0x0000700c
...
...
@@ -1375,8 +1378,18 @@
#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001
#define NVRAM_CFG1_BUFFERED_MODE 0x00000002
#define NVRAM_CFG1_PASS_THRU 0x00000004
#define NVRAM_CFG1_STATUS_BITS 0x00000070
#define NVRAM_CFG1_BIT_BANG 0x00000008
#define NVRAM_CFG1_FLASH_SIZE 0x02000000
#define NVRAM_CFG1_COMPAT_BYPASS 0x80000000
#define NVRAM_CFG1_VENDOR_MASK 0x03000003
#define FLASH_VENDOR_ATMEL_EEPROM 0x02000000
#define FLASH_VENDOR_ATMEL_FLASH_BUFFERED 0x02000003
#define FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED 0x00000003
#define FLASH_VENDOR_ST 0x03000001
#define FLASH_VENDOR_SAIFUN 0x01000003
#define FLASH_VENDOR_SST_SMALL 0x00000001
#define FLASH_VENDOR_SST_LARGE 0x02000001
#define NVRAM_CFG2 0x00007018
#define NVRAM_CFG3 0x0000701c
#define NVRAM_SWARB 0x00007020
...
...
@@ -1396,15 +1409,16 @@
#define SWARB_REQ1 0x00002000
#define SWARB_REQ2 0x00004000
#define SWARB_REQ3 0x00008000
#define NVRAM_BUFFERED_PAGE_SIZE 264
#define NVRAM_BUFFERED_PAGE_POS 9
#define NVRAM_ACCESS 0x00007024
#define ACCESS_ENABLE 0x00000001
#define ACCESS_WR_ENABLE 0x00000002
/* 0x7024 --> 0x7400 unused */
#define NVRAM_WRITE1 0x00007028
/* 0x702c --> 0x7400 unused */
/* 0x7400 --> 0x8000 unused */
#define TG3_EEPROM_MAGIC 0x669955aa
/* 32K Window into NIC internal memory */
#define NIC_SRAM_WIN_BASE 0x00008000
...
...
@@ -1980,11 +1994,12 @@ struct tg3 {
* lock: Held during all operations except TX packet
* processing.
*
*
dev->xmit_lock: Held during tg3_start_xmit
and tg3_tx
*
tx_lock: Held during tg3_start_xmit{,_4gbug}
and tg3_tx
*
* If you want to shut up all asynchronous processing you must
* acquire both locks, 'lock' taken before 'xmit_lock'. IRQs must
* be disabled to take either lock.
* acquire both locks, 'lock' taken before 'tx_lock'. IRQs must
* be disabled to take 'lock' but only softirq disabling is
* necessary for acquisition of 'tx_lock'.
*/
spinlock_t
lock
;
spinlock_t
indirect_lock
;
...
...
@@ -2003,6 +2018,8 @@ struct tg3 {
u32
tx_cons
;
u32
tx_pending
;
spinlock_t
tx_lock
;
struct
tg3_tx_buffer_desc
*
tx_ring
;
struct
tx_ring_info
*
tx_buffers
;
dma_addr_t
tx_desc_mapping
;
...
...
@@ -2087,6 +2104,7 @@ struct tg3 {
#define TG3_FLG2_PHY_JUST_INITTED 0x00001000
#define TG3_FLG2_PHY_SERDES 0x00002000
#define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000
#define TG3_FLG2_FLASH 0x00008000
u32
split_mode_max_reqs
;
#define SPLIT_MODE_5704_MAX_REQ 3
...
...
@@ -2160,6 +2178,34 @@ struct tg3 {
struct
tg3_hw_stats
*
hw_stats
;
dma_addr_t
stats_mapping
;
struct
work_struct
reset_task
;
u32
nvram_size
;
u32
nvram_pagesize
;
u32
nvram_jedecnum
;
#define JEDEC_ATMEL 0x1f
#define JEDEC_ST 0x20
#define JEDEC_SAIFUN 0x4f
#define JEDEC_SST 0xbf
#define ATMEL_AT24C64_CHIP_SIZE (64 * 1024)
#define ATMEL_AT24C64_PAGE_SIZE (32)
#define ATMEL_AT24C512_CHIP_SIZE (512 * 1024)
#define ATMEL_AT24C512_PAGE_SIZE (128)
#define ATMEL_AT45DB0X1B_PAGE_POS 9
#define ATMEL_AT45DB0X1B_PAGE_SIZE 264
#define ATMEL_AT25F512_PAGE_SIZE 256
#define ST_M45PEX0_PAGE_SIZE 256
#define SAIFUN_SA25F0XX_PAGE_SIZE 256
#define SST_25VF0X0_PAGE_SIZE 4098
};
#endif
/* !(_T3_H) */
include/linux/netdevice.h
View file @
e1acfd06
...
...
@@ -76,6 +76,7 @@ struct ethtool_ops;
/* Driver transmit return codes */
#define NETDEV_TX_OK 0
/* driver took care of packet */
#define NETDEV_TX_BUSY 1
/* driver tx path was busy*/
#define NETDEV_TX_LOCKED -1
/* driver tx lock was already taken */
/*
* Compute the worst case header length according to the protocols
...
...
@@ -414,7 +415,7 @@ struct net_device
#define NETIF_F_HW_VLAN_FILTER 512
/* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024
/* Device cannot handle VLAN packets */
#define NETIF_F_TSO 2048
/* Can offload TCP/IP segmentation */
#define NETIF_F_LLTX 4096
/*
Do not grab xmit_lock during ->hard_start_xmit
*/
#define NETIF_F_LLTX 4096
/*
LockLess TX
*/
/* Called after device is detached from network. */
void
(
*
uninit
)(
struct
net_device
*
dev
);
...
...
@@ -893,11 +894,9 @@ static inline void __netif_rx_complete(struct net_device *dev)
static
inline
void
netif_tx_disable
(
struct
net_device
*
dev
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
dev
->
xmit_lock
,
flags
);
spin_lock_bh
(
&
dev
->
xmit_lock
);
netif_stop_queue
(
dev
);
spin_unlock_
irqrestore
(
&
dev
->
xmit_lock
,
flags
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
}
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
...
...
net/atm/clip.c
View file @
e1acfd06
...
...
@@ -97,7 +97,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
printk
(
KERN_CRIT
"!clip_vcc->entry (clip_vcc %p)
\n
"
,
clip_vcc
);
return
;
}
spin_lock_
irq
(
&
entry
->
neigh
->
dev
->
xmit_lock
);
/* block clip_start_xmit() */
spin_lock_
bh
(
&
entry
->
neigh
->
dev
->
xmit_lock
);
/* block clip_start_xmit() */
entry
->
neigh
->
used
=
jiffies
;
for
(
walk
=
&
entry
->
vccs
;
*
walk
;
walk
=
&
(
*
walk
)
->
next
)
if
(
*
walk
==
clip_vcc
)
{
...
...
@@ -121,7 +121,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
printk
(
KERN_CRIT
"ATMARP: unlink_clip_vcc failed (entry %p, vcc "
"0x%p)
\n
"
,
entry
,
clip_vcc
);
out:
spin_unlock_
irq
(
&
entry
->
neigh
->
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
entry
->
neigh
->
dev
->
xmit_lock
);
}
/* The neighbour entry n->lock is held. */
...
...
net/core/dev.c
View file @
e1acfd06
...
...
@@ -1190,7 +1190,7 @@ int __skb_linearize(struct sk_buff *skb, int gfp_mask)
#define HARD_TX_LOCK(dev, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
spin_lock
_irq
(&dev->xmit_lock); \
spin_lock(&dev->xmit_lock); \
dev->xmit_lock_owner = cpu; \
} \
}
...
...
@@ -1198,7 +1198,7 @@ int __skb_linearize(struct sk_buff *skb, int gfp_mask)
#define HARD_TX_UNLOCK(dev) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
dev->xmit_lock_owner = -1; \
spin_unlock
_irq(&dev->xmit_lock);
\
spin_unlock
(&dev->xmit_lock);
\
} \
}
...
...
net/core/dev_mcast.c
View file @
e1acfd06
...
...
@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev)
void
dev_mc_upload
(
struct
net_device
*
dev
)
{
spin_lock_
irq
(
&
dev
->
xmit_lock
);
spin_lock_
bh
(
&
dev
->
xmit_lock
);
__dev_mc_upload
(
dev
);
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
}
/*
...
...
@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
int
err
=
0
;
struct
dev_mc_list
*
dmi
,
**
dmip
;
spin_lock_
irq
(
&
dev
->
xmit_lock
);
spin_lock_
bh
(
&
dev
->
xmit_lock
);
for
(
dmip
=
&
dev
->
mc_list
;
(
dmi
=
*
dmip
)
!=
NULL
;
dmip
=
&
dmi
->
next
)
{
/*
...
...
@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
*/
__dev_mc_upload
(
dev
);
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
return
0
;
}
}
err
=
-
ENOENT
;
done:
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
return
err
;
}
...
...
@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
dmi1
=
(
struct
dev_mc_list
*
)
kmalloc
(
sizeof
(
*
dmi
),
GFP_ATOMIC
);
spin_lock_
irq
(
&
dev
->
xmit_lock
);
spin_lock_
bh
(
&
dev
->
xmit_lock
);
for
(
dmi
=
dev
->
mc_list
;
dmi
!=
NULL
;
dmi
=
dmi
->
next
)
{
if
(
memcmp
(
dmi
->
dmi_addr
,
addr
,
dmi
->
dmi_addrlen
)
==
0
&&
dmi
->
dmi_addrlen
==
alen
)
{
...
...
@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
}
if
((
dmi
=
dmi1
)
==
NULL
)
{
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
return
-
ENOMEM
;
}
memcpy
(
dmi
->
dmi_addr
,
addr
,
alen
);
...
...
@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
__dev_mc_upload
(
dev
);
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
return
0
;
done:
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
if
(
dmi1
)
kfree
(
dmi1
);
return
err
;
...
...
@@ -205,7 +205,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
void
dev_mc_discard
(
struct
net_device
*
dev
)
{
spin_lock_
irq
(
&
dev
->
xmit_lock
);
spin_lock_
bh
(
&
dev
->
xmit_lock
);
while
(
dev
->
mc_list
!=
NULL
)
{
struct
dev_mc_list
*
tmp
=
dev
->
mc_list
;
...
...
@@ -216,7 +216,7 @@ void dev_mc_discard(struct net_device *dev)
}
dev
->
mc_count
=
0
;
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
}
#ifdef CONFIG_PROC_FS
...
...
@@ -251,7 +251,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
struct
dev_mc_list
*
m
;
struct
net_device
*
dev
=
v
;
spin_lock_
irq
(
&
dev
->
xmit_lock
);
spin_lock_
bh
(
&
dev
->
xmit_lock
);
for
(
m
=
dev
->
mc_list
;
m
;
m
=
m
->
next
)
{
int
i
;
...
...
@@ -263,7 +263,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
seq_putc
(
seq
,
'\n'
);
}
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
return
0
;
}
...
...
net/core/netpoll.c
View file @
e1acfd06
...
...
@@ -188,7 +188,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
return
;
}
spin_lock
_irq
(
&
np
->
dev
->
xmit_lock
);
spin_lock
(
&
np
->
dev
->
xmit_lock
);
np
->
dev
->
xmit_lock_owner
=
smp_processor_id
();
/*
...
...
@@ -197,7 +197,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
*/
if
(
netif_queue_stopped
(
np
->
dev
))
{
np
->
dev
->
xmit_lock_owner
=
-
1
;
spin_unlock
_irq
(
&
np
->
dev
->
xmit_lock
);
spin_unlock
(
&
np
->
dev
->
xmit_lock
);
netpoll_poll
(
np
);
goto
repeat
;
...
...
@@ -205,7 +205,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
status
=
np
->
dev
->
hard_start_xmit
(
skb
,
np
->
dev
);
np
->
dev
->
xmit_lock_owner
=
-
1
;
spin_unlock
_irq
(
&
np
->
dev
->
xmit_lock
);
spin_unlock
(
&
np
->
dev
->
xmit_lock
);
/* transmit busy */
if
(
status
)
{
...
...
net/core/pktgen.c
View file @
e1acfd06
...
...
@@ -2664,11 +2664,12 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
}
}
spin_lock_
irq
(
&
odev
->
xmit_lock
);
spin_lock_
bh
(
&
odev
->
xmit_lock
);
if
(
!
netif_queue_stopped
(
odev
))
{
u64
now
;
atomic_inc
(
&
(
pkt_dev
->
skb
->
users
));
retry_now:
ret
=
odev
->
hard_start_xmit
(
pkt_dev
->
skb
,
odev
);
if
(
likely
(
ret
==
NETDEV_TX_OK
))
{
pkt_dev
->
last_ok
=
1
;
...
...
@@ -2676,6 +2677,10 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev
->
seq_num
++
;
pkt_dev
->
tx_bytes
+=
pkt_dev
->
cur_pkt_size
;
}
else
if
(
ret
==
NETDEV_TX_LOCKED
&&
(
odev
->
features
&
NETIF_F_LLTX
))
{
cpu_relax
();
goto
retry_now
;
}
else
{
/* Retry it next time */
atomic_dec
(
&
(
pkt_dev
->
skb
->
users
));
...
...
@@ -2711,7 +2716,7 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev
->
next_tx_ns
=
0
;
}
spin_unlock_
irq
(
&
odev
->
xmit_lock
);
spin_unlock_
bh
(
&
odev
->
xmit_lock
);
/* If pkt_dev->count is zero, then run forever */
if
((
pkt_dev
->
count
!=
0
)
&&
(
pkt_dev
->
sofar
>=
pkt_dev
->
count
))
{
...
...
net/netlink/af_netlink.c
View file @
e1acfd06
...
...
@@ -91,12 +91,12 @@ struct nl_pid_hash {
struct
netlink_table
{
struct
nl_pid_hash
hash
;
struct
hlist_head
mc_list
;
unsigned
int
nl_nonroot
;
};
static
struct
netlink_table
*
nl_table
;
static
DECLARE_WAIT_QUEUE_HEAD
(
nl_table_wait
);
static
unsigned
int
nl_nonroot
[
MAX_LINKS
];
static
int
netlink_dump
(
struct
sock
*
sk
);
static
void
netlink_destroy_callback
(
struct
netlink_callback
*
cb
);
...
...
@@ -438,7 +438,7 @@ static int netlink_autobind(struct socket *sock)
static
inline
int
netlink_capable
(
struct
socket
*
sock
,
unsigned
int
flag
)
{
return
(
nl_
nonroot
[
sock
->
sk
->
sk_protocol
]
&
flag
)
||
return
(
nl_
table
[
sock
->
sk
->
sk_protocol
].
nl_nonroot
&
flag
)
||
capable
(
CAP_NET_ADMIN
);
}
...
...
@@ -1066,7 +1066,7 @@ netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
void
netlink_set_nonroot
(
int
protocol
,
unsigned
int
flags
)
{
if
((
unsigned
int
)
protocol
<
MAX_LINKS
)
nl_
nonroot
[
protocol
]
=
flags
;
nl_
table
[
protocol
].
nl_nonroot
=
flags
;
}
static
void
netlink_destroy_callback
(
struct
netlink_callback
*
cb
)
...
...
net/sched/sch_generic.c
View file @
e1acfd06
...
...
@@ -99,11 +99,17 @@ int qdisc_restart(struct net_device *dev)
if
((
skb
=
q
->
dequeue
(
q
))
!=
NULL
)
{
unsigned
nolock
=
(
dev
->
features
&
NETIF_F_LLTX
);
/*
* When the driver has LLTX set it does not require any
* locking in start_xmit.
* When the driver has LLTX set it does its own locking
* in start_xmit. No need to add additional overhead by
* locking again. These checks are worth it because
* even uncongested locks can be quite expensive.
* The driver can do trylock like here too, in case
* of lock congestion it should return -1 and the packet
* will be requeued.
*/
if
(
!
nolock
)
{
if
(
!
spin_trylock_irq
(
&
dev
->
xmit_lock
))
{
if
(
!
spin_trylock
(
&
dev
->
xmit_lock
))
{
collision:
/* So, someone grabbed the driver. */
/* It may be transient configuration error,
...
...
@@ -137,18 +143,22 @@ int qdisc_restart(struct net_device *dev)
if
(
ret
==
NETDEV_TX_OK
)
{
if
(
!
nolock
)
{
dev
->
xmit_lock_owner
=
-
1
;
spin_unlock
_irq
(
&
dev
->
xmit_lock
);
spin_unlock
(
&
dev
->
xmit_lock
);
}
spin_lock
(
&
dev
->
queue_lock
);
return
-
1
;
}
if
(
ret
==
NETDEV_TX_LOCKED
&&
nolock
)
{
spin_lock
(
&
dev
->
queue_lock
);
goto
collision
;
}
}
/* NETDEV_TX_BUSY - we need to requeue */
/* Release the driver */
if
(
!
nolock
)
{
dev
->
xmit_lock_owner
=
-
1
;
spin_unlock
_irq
(
&
dev
->
xmit_lock
);
spin_unlock
(
&
dev
->
xmit_lock
);
}
spin_lock
(
&
dev
->
queue_lock
);
q
=
dev
->
qdisc
;
...
...
@@ -176,7 +186,7 @@ static void dev_watchdog(unsigned long arg)
{
struct
net_device
*
dev
=
(
struct
net_device
*
)
arg
;
spin_lock
_irq
(
&
dev
->
xmit_lock
);
spin_lock
(
&
dev
->
xmit_lock
);
if
(
dev
->
qdisc
!=
&
noop_qdisc
)
{
if
(
netif_device_present
(
dev
)
&&
netif_running
(
dev
)
&&
...
...
@@ -190,7 +200,7 @@ static void dev_watchdog(unsigned long arg)
dev_hold
(
dev
);
}
}
spin_unlock
_irq
(
&
dev
->
xmit_lock
);
spin_unlock
(
&
dev
->
xmit_lock
);
dev_put
(
dev
);
}
...
...
@@ -214,17 +224,17 @@ void __netdev_watchdog_up(struct net_device *dev)
static
void
dev_watchdog_up
(
struct
net_device
*
dev
)
{
spin_lock_
irq
(
&
dev
->
xmit_lock
);
spin_lock_
bh
(
&
dev
->
xmit_lock
);
__netdev_watchdog_up
(
dev
);
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
}
static
void
dev_watchdog_down
(
struct
net_device
*
dev
)
{
spin_lock_
irq
(
&
dev
->
xmit_lock
);
spin_lock_
bh
(
&
dev
->
xmit_lock
);
if
(
del_timer
(
&
dev
->
watchdog_timer
))
__dev_put
(
dev
);
spin_unlock_
irq
(
&
dev
->
xmit_lock
);
spin_unlock_
bh
(
&
dev
->
xmit_lock
);
}
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
...
...
net/sched/sch_teql.c
View file @
e1acfd06
...
...
@@ -301,12 +301,12 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
switch
(
teql_resolve
(
skb
,
skb_res
,
slave
))
{
case
0
:
if
(
spin_trylock
_irq
(
&
slave
->
xmit_lock
))
{
if
(
spin_trylock
(
&
slave
->
xmit_lock
))
{
slave
->
xmit_lock_owner
=
smp_processor_id
();
if
(
!
netif_queue_stopped
(
slave
)
&&
slave
->
hard_start_xmit
(
skb
,
slave
)
==
0
)
{
slave
->
xmit_lock_owner
=
-
1
;
spin_unlock
_irq
(
&
slave
->
xmit_lock
);
spin_unlock
(
&
slave
->
xmit_lock
);
master
->
slaves
=
NEXT_SLAVE
(
q
);
netif_wake_queue
(
dev
);
master
->
stats
.
tx_packets
++
;
...
...
@@ -314,7 +314,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
return
0
;
}
slave
->
xmit_lock_owner
=
-
1
;
spin_unlock
_irq
(
&
slave
->
xmit_lock
);
spin_unlock
(
&
slave
->
xmit_lock
);
}
if
(
netif_queue_stopped
(
dev
))
busy
=
1
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment