Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8e101155
Commit
8e101155
authored
May 27, 2004
by
Ganesh Venkatesan
Committed by
Jeff Garzik
May 27, 2004
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] e1000 4/7: Estimate number of tx descriptors required
Reduce scope of tx_lock
parent
668c0cdb
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
58 additions
and
74 deletions
+58
-74
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_main.c
+58
-74
No files found.
drivers/net/e1000/e1000_main.c
View file @
8e101155
...
@@ -1560,33 +1560,17 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
...
@@ -1560,33 +1560,17 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
static
inline
int
static
inline
int
e1000_tx_map
(
struct
e1000_adapter
*
adapter
,
struct
sk_buff
*
skb
,
e1000_tx_map
(
struct
e1000_adapter
*
adapter
,
struct
sk_buff
*
skb
,
unsigned
int
first
)
unsigned
int
first
,
unsigned
int
max_per_txd
,
unsigned
int
nr_frags
,
unsigned
int
mss
)
{
{
struct
e1000_desc_ring
*
tx_ring
=
&
adapter
->
tx_ring
;
struct
e1000_desc_ring
*
tx_ring
=
&
adapter
->
tx_ring
;
struct
e1000_tx_desc
*
tx_desc
;
struct
e1000_buffer
*
buffer_info
;
struct
e1000_buffer
*
buffer_info
;
unsigned
int
len
=
skb
->
len
,
max_per_txd
=
E1000_MAX_DATA_PER_TXD
;
unsigned
int
len
=
skb
->
len
;
unsigned
int
offset
=
0
,
size
,
count
=
0
,
i
;
unsigned
int
offset
=
0
,
size
,
count
=
0
,
i
;
#ifdef NETIF_F_TSO
unsigned
int
mss
;
#endif
unsigned
int
nr_frags
;
unsigned
int
f
;
unsigned
int
f
;
#ifdef NETIF_F_TSO
mss
=
skb_shinfo
(
skb
)
->
tso_size
;
/* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
* drops. */
if
(
mss
)
max_per_txd
=
min
(
mss
<<
2
,
max_per_txd
);
#endif
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
len
-=
skb
->
data_len
;
len
-=
skb
->
data_len
;
i
=
tx_ring
->
next_to_use
;
i
=
tx_ring
->
next_to_use
;
while
(
len
)
{
while
(
len
)
{
...
@@ -1658,46 +1642,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
...
@@ -1658,46 +1642,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
if
(
++
i
==
tx_ring
->
count
)
i
=
0
;
if
(
++
i
==
tx_ring
->
count
)
i
=
0
;
}
}
}
}
if
(
E1000_DESC_UNUSED
(
&
adapter
->
tx_ring
)
<
count
+
2
)
{
/* There aren't enough descriptors available to queue up
* this send (need: count + 1 context desc + 1 desc gap
* to keep tail from touching head), so undo the mapping
* and abort the send. We could have done the check before
* we mapped the skb, but because of all the workarounds
* (above), it's too difficult to predict how many we're
* going to need.*/
i
=
tx_ring
->
next_to_use
;
if
(
i
==
first
)
{
/* Cleanup after e1000_tx_[csum|tso] scribbling
* on descriptors. */
tx_desc
=
E1000_TX_DESC
(
*
tx_ring
,
first
);
tx_desc
->
buffer_addr
=
0
;
tx_desc
->
lower
.
data
=
0
;
tx_desc
->
upper
.
data
=
0
;
}
while
(
count
--
)
{
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
if
(
buffer_info
->
dma
)
{
pci_unmap_page
(
adapter
->
pdev
,
buffer_info
->
dma
,
buffer_info
->
length
,
PCI_DMA_TODEVICE
);
buffer_info
->
dma
=
0
;
}
if
(
++
i
==
tx_ring
->
count
)
i
=
0
;
}
tx_ring
->
next_to_use
=
first
;
return
0
;
}
i
=
(
i
==
0
)
?
tx_ring
->
count
-
1
:
i
-
1
;
i
=
(
i
==
0
)
?
tx_ring
->
count
-
1
:
i
-
1
;
tx_ring
->
buffer_info
[
i
].
skb
=
skb
;
tx_ring
->
buffer_info
[
i
].
skb
=
skb
;
tx_ring
->
buffer_info
[
first
].
next_to_watch
=
i
;
tx_ring
->
buffer_info
[
first
].
next_to_watch
=
i
;
...
@@ -1792,27 +1736,72 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
...
@@ -1792,27 +1736,72 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
return
0
;
return
0
;
}
}
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
static
int
static
int
e1000_xmit_frame
(
struct
sk_buff
*
skb
,
struct
net_device
*
netdev
)
e1000_xmit_frame
(
struct
sk_buff
*
skb
,
struct
net_device
*
netdev
)
{
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
unsigned
int
first
;
unsigned
int
first
,
max_per_txd
=
E1000_MAX_DATA_PER_TXD
;
unsigned
int
max_txd_pwr
=
E1000_MAX_TXD_PWR
;
unsigned
int
tx_flags
=
0
;
unsigned
int
tx_flags
=
0
;
unsigned
long
flags
;
unsigned
long
flags
;
int
count
;
unsigned
int
len
=
skb
->
len
;
int
count
=
0
;
unsigned
int
mss
=
0
;
unsigned
int
nr_frags
=
0
;
unsigned
int
f
;
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
len
-=
skb
->
data_len
;
if
(
skb
->
len
<=
0
)
{
if
(
skb
->
len
<=
0
)
{
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
return
0
;
return
0
;
}
}
#ifdef NETIF_F_TSO
mss
=
skb_shinfo
(
skb
)
->
tso_size
;
/* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
* drops. */
if
(
mss
)
{
max_per_txd
=
min
(
mss
<<
2
,
max_per_txd
);
max_txd_pwr
=
fls
(
max_per_txd
)
-
1
;
}
if
((
mss
)
||
(
skb
->
ip_summed
==
CHECKSUM_HW
))
count
++
;
count
++
;
/*for sentinel desc*/
#else
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
count
++
;
#endif
count
+=
TXD_USE_COUNT
(
len
,
max_txd_pwr
);
if
(
adapter
->
pcix_82544
)
count
++
;
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
for
(
f
=
0
;
f
<
nr_frags
;
f
++
)
count
+=
TXD_USE_COUNT
(
skb_shinfo
(
skb
)
->
frags
[
f
].
size
,
max_txd_pwr
);
if
(
adapter
->
pcix_82544
)
count
+=
nr_frags
;
spin_lock_irqsave
(
&
adapter
->
tx_lock
,
flags
);
spin_lock_irqsave
(
&
adapter
->
tx_lock
,
flags
);
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
if
(
E1000_DESC_UNUSED
(
&
adapter
->
tx_ring
)
<
count
+
2
)
{
netif_stop_queue
(
netdev
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
1
;
}
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
if
(
adapter
->
hw
.
mac_type
==
e1000_82547
)
{
if
(
adapter
->
hw
.
mac_type
==
e1000_82547
)
{
if
(
e1000_82547_fifo_workaround
(
adapter
,
skb
))
{
if
(
e1000_82547_fifo_workaround
(
adapter
,
skb
))
{
netif_stop_queue
(
netdev
);
netif_stop_queue
(
netdev
);
mod_timer
(
&
adapter
->
tx_fifo_stall_timer
,
jiffies
);
mod_timer
(
&
adapter
->
tx_fifo_stall_timer
,
jiffies
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
1
;
return
1
;
}
}
}
}
...
@@ -1829,18 +1818,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
...
@@ -1829,18 +1818,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
else
if
(
e1000_tx_csum
(
adapter
,
skb
))
else
if
(
e1000_tx_csum
(
adapter
,
skb
))
tx_flags
|=
E1000_TX_FLAGS_CSUM
;
tx_flags
|=
E1000_TX_FLAGS_CSUM
;
if
((
count
=
e1000_tx_map
(
adapter
,
skb
,
first
)))
e1000_tx_queue
(
adapter
,
e1000_tx_queue
(
adapter
,
count
,
tx_flags
);
e1000_tx_map
(
adapter
,
skb
,
first
,
max_per_txd
,
nr_frags
,
mss
),
else
{
tx_flags
);
netif_stop_queue
(
netdev
);
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
1
;
}
netdev
->
trans_start
=
jiffies
;
netdev
->
trans_start
=
jiffies
;
spin_unlock_irqrestore
(
&
adapter
->
tx_lock
,
flags
);
return
0
;
return
0
;
}
}
...
@@ -2193,7 +2176,6 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
...
@@ -2193,7 +2176,6 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
unsigned
int
i
,
eop
;
unsigned
int
i
,
eop
;
boolean_t
cleaned
=
FALSE
;
boolean_t
cleaned
=
FALSE
;
spin_lock
(
&
adapter
->
tx_lock
);
i
=
tx_ring
->
next_to_clean
;
i
=
tx_ring
->
next_to_clean
;
eop
=
tx_ring
->
buffer_info
[
i
].
next_to_watch
;
eop
=
tx_ring
->
buffer_info
[
i
].
next_to_watch
;
...
@@ -2236,6 +2218,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
...
@@ -2236,6 +2218,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring
->
next_to_clean
=
i
;
tx_ring
->
next_to_clean
=
i
;
spin_lock
(
&
adapter
->
tx_lock
);
if
(
cleaned
&&
netif_queue_stopped
(
netdev
)
&&
netif_carrier_ok
(
netdev
))
if
(
cleaned
&&
netif_queue_stopped
(
netdev
)
&&
netif_carrier_ok
(
netdev
))
netif_wake_queue
(
netdev
);
netif_wake_queue
(
netdev
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment