Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
a13c1327
Commit
a13c1327
authored
Dec 21, 2010
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-davem' of
git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next-2.6
parents
34a52f36
c04bfc6b
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
35 additions
and
115 deletions
+35
-115
drivers/net/sfc/efx.c
drivers/net/sfc/efx.c
+14
-10
drivers/net/sfc/efx.h
drivers/net/sfc/efx.h
+0
-2
drivers/net/sfc/net_driver.h
drivers/net/sfc/net_driver.h
+2
-11
drivers/net/sfc/tx.c
drivers/net/sfc/tx.c
+19
-92
No files found.
drivers/net/sfc/efx.c
View file @
a13c1327
...
@@ -461,9 +461,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
...
@@ -461,9 +461,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
}
}
}
}
spin_lock_init
(
&
channel
->
tx_stop_lock
);
atomic_set
(
&
channel
->
tx_stop_count
,
1
);
rx_queue
=
&
channel
->
rx_queue
;
rx_queue
=
&
channel
->
rx_queue
;
rx_queue
->
efx
=
efx
;
rx_queue
->
efx
=
efx
;
setup_timer
(
&
rx_queue
->
slow_fill
,
efx_rx_slow_fill
,
setup_timer
(
&
rx_queue
->
slow_fill
,
efx_rx_slow_fill
,
...
@@ -1406,11 +1403,11 @@ static void efx_start_all(struct efx_nic *efx)
...
@@ -1406,11 +1403,11 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
* restart the transmit interface early so the watchdog timer stops */
efx_start_port
(
efx
);
efx_start_port
(
efx
);
efx_for_each_channel
(
channel
,
efx
)
{
if
(
efx_dev_registered
(
efx
))
if
(
efx_dev_registered
(
efx
))
efx_wake_queue
(
channel
);
netif_tx_wake_all_queues
(
efx
->
net_dev
);
efx_for_each_channel
(
channel
,
efx
)
efx_start_channel
(
channel
);
efx_start_channel
(
channel
);
}
if
(
efx
->
legacy_irq
)
if
(
efx
->
legacy_irq
)
efx
->
legacy_irq_enabled
=
true
;
efx
->
legacy_irq_enabled
=
true
;
...
@@ -1498,9 +1495,7 @@ static void efx_stop_all(struct efx_nic *efx)
...
@@ -1498,9 +1495,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
* timer isn't ticking over the flush */
if
(
efx_dev_registered
(
efx
))
{
if
(
efx_dev_registered
(
efx
))
{
struct
efx_channel
*
channel
;
netif_tx_stop_all_queues
(
efx
->
net_dev
);
efx_for_each_channel
(
channel
,
efx
)
efx_stop_queue
(
channel
);
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
}
}
...
@@ -1896,6 +1891,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
...
@@ -1896,6 +1891,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static
int
efx_register_netdev
(
struct
efx_nic
*
efx
)
static
int
efx_register_netdev
(
struct
efx_nic
*
efx
)
{
{
struct
net_device
*
net_dev
=
efx
->
net_dev
;
struct
net_device
*
net_dev
=
efx
->
net_dev
;
struct
efx_channel
*
channel
;
int
rc
;
int
rc
;
net_dev
->
watchdog_timeo
=
5
*
HZ
;
net_dev
->
watchdog_timeo
=
5
*
HZ
;
...
@@ -1918,6 +1914,14 @@ static int efx_register_netdev(struct efx_nic *efx)
...
@@ -1918,6 +1914,14 @@ static int efx_register_netdev(struct efx_nic *efx)
if
(
rc
)
if
(
rc
)
goto
fail_locked
;
goto
fail_locked
;
efx_for_each_channel
(
channel
,
efx
)
{
struct
efx_tx_queue
*
tx_queue
;
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
{
tx_queue
->
core_txq
=
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
);
}
}
/* Always start with carrier off; PHY events will detect the link */
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off
(
efx
->
net_dev
);
netif_carrier_off
(
efx
->
net_dev
);
...
...
drivers/net/sfc/efx.h
View file @
a13c1327
...
@@ -36,8 +36,6 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
...
@@ -36,8 +36,6 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern
netdev_tx_t
extern
netdev_tx_t
efx_enqueue_skb
(
struct
efx_tx_queue
*
tx_queue
,
struct
sk_buff
*
skb
);
efx_enqueue_skb
(
struct
efx_tx_queue
*
tx_queue
,
struct
sk_buff
*
skb
);
extern
void
efx_xmit_done
(
struct
efx_tx_queue
*
tx_queue
,
unsigned
int
index
);
extern
void
efx_xmit_done
(
struct
efx_tx_queue
*
tx_queue
,
unsigned
int
index
);
extern
void
efx_stop_queue
(
struct
efx_channel
*
channel
);
extern
void
efx_wake_queue
(
struct
efx_channel
*
channel
);
/* RX */
/* RX */
extern
int
efx_probe_rx_queue
(
struct
efx_rx_queue
*
rx_queue
);
extern
int
efx_probe_rx_queue
(
struct
efx_rx_queue
*
rx_queue
);
...
...
drivers/net/sfc/net_driver.h
View file @
a13c1327
...
@@ -136,6 +136,7 @@ struct efx_tx_buffer {
...
@@ -136,6 +136,7 @@ struct efx_tx_buffer {
* @efx: The associated Efx NIC
* @efx: The associated Efx NIC
* @queue: DMA queue number
* @queue: DMA queue number
* @channel: The associated channel
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
* @ptr_mask: The size of the ring minus 1.
...
@@ -148,8 +149,6 @@ struct efx_tx_buffer {
...
@@ -148,8 +149,6 @@ struct efx_tx_buffer {
* variable indicates that the queue is empty. This is to
* variable indicates that the queue is empty. This is to
* avoid cache-line ping-pong between the xmit path and the
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* completion path.
* @stopped: Stopped count.
* Set if this TX queue is currently stopping its port.
* @insert_count: Current insert pointer
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* This is the number of buffers that have been added to the
* software ring.
* software ring.
...
@@ -179,7 +178,7 @@ struct efx_tx_queue {
...
@@ -179,7 +178,7 @@ struct efx_tx_queue {
struct
efx_nic
*
efx
____cacheline_aligned_in_smp
;
struct
efx_nic
*
efx
____cacheline_aligned_in_smp
;
unsigned
queue
;
unsigned
queue
;
struct
efx_channel
*
channel
;
struct
efx_channel
*
channel
;
struct
efx_nic
*
nic
;
struct
netdev_queue
*
core_txq
;
struct
efx_tx_buffer
*
buffer
;
struct
efx_tx_buffer
*
buffer
;
struct
efx_special_buffer
txd
;
struct
efx_special_buffer
txd
;
unsigned
int
ptr_mask
;
unsigned
int
ptr_mask
;
...
@@ -188,7 +187,6 @@ struct efx_tx_queue {
...
@@ -188,7 +187,6 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
/* Members used mainly on the completion path */
unsigned
int
read_count
____cacheline_aligned_in_smp
;
unsigned
int
read_count
____cacheline_aligned_in_smp
;
unsigned
int
old_write_count
;
unsigned
int
old_write_count
;
int
stopped
;
/* Members used only on the xmit path */
/* Members used only on the xmit path */
unsigned
int
insert_count
____cacheline_aligned_in_smp
;
unsigned
int
insert_count
____cacheline_aligned_in_smp
;
...
@@ -321,7 +319,6 @@ enum efx_rx_alloc_method {
...
@@ -321,7 +319,6 @@ enum efx_rx_alloc_method {
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
* @napi_str: NAPI control structure
* @reset_work: Scheduled reset work thread
* @work_pending: Is work pending via NAPI?
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
* @eventq_mask: Event queue pointer mask
...
@@ -342,8 +339,6 @@ enum efx_rx_alloc_method {
...
@@ -342,8 +339,6 @@ enum efx_rx_alloc_method {
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @rx_queue: RX queue for this channel
* @rx_queue: RX queue for this channel
* @tx_stop_count: Core TX queue stop count
* @tx_stop_lock: Core TX queue stop lock
* @tx_queue: TX queues for this channel
* @tx_queue: TX queues for this channel
*/
*/
struct
efx_channel
{
struct
efx_channel
{
...
@@ -382,10 +377,6 @@ struct efx_channel {
...
@@ -382,10 +377,6 @@ struct efx_channel {
bool
rx_pkt_csummed
;
bool
rx_pkt_csummed
;
struct
efx_rx_queue
rx_queue
;
struct
efx_rx_queue
rx_queue
;
atomic_t
tx_stop_count
;
spinlock_t
tx_stop_lock
;
struct
efx_tx_queue
tx_queue
[
2
];
struct
efx_tx_queue
tx_queue
[
2
];
};
};
...
...
drivers/net/sfc/tx.c
View file @
a13c1327
...
@@ -30,50 +30,6 @@
...
@@ -30,50 +30,6 @@
*/
*/
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
/* We need to be able to nest calls to netif_tx_stop_queue(), partly
* because of the 2 hardware queues associated with each core queue,
* but also so that we can inhibit TX for reasons other than a full
* hardware queue. */
void
efx_stop_queue
(
struct
efx_channel
*
channel
)
{
struct
efx_nic
*
efx
=
channel
->
efx
;
struct
efx_tx_queue
*
tx_queue
=
efx_channel_get_tx_queue
(
channel
,
0
);
if
(
!
tx_queue
)
return
;
spin_lock_bh
(
&
channel
->
tx_stop_lock
);
netif_vdbg
(
efx
,
tx_queued
,
efx
->
net_dev
,
"stop TX queue
\n
"
);
atomic_inc
(
&
channel
->
tx_stop_count
);
netif_tx_stop_queue
(
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
));
spin_unlock_bh
(
&
channel
->
tx_stop_lock
);
}
/* Decrement core TX queue stop count and wake it if the count is 0 */
void
efx_wake_queue
(
struct
efx_channel
*
channel
)
{
struct
efx_nic
*
efx
=
channel
->
efx
;
struct
efx_tx_queue
*
tx_queue
=
efx_channel_get_tx_queue
(
channel
,
0
);
if
(
!
tx_queue
)
return
;
local_bh_disable
();
if
(
atomic_dec_and_lock
(
&
channel
->
tx_stop_count
,
&
channel
->
tx_stop_lock
))
{
netif_vdbg
(
efx
,
tx_queued
,
efx
->
net_dev
,
"waking TX queue
\n
"
);
netif_tx_wake_queue
(
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
));
spin_unlock
(
&
channel
->
tx_stop_lock
);
}
local_bh_enable
();
}
static
void
efx_dequeue_buffer
(
struct
efx_tx_queue
*
tx_queue
,
static
void
efx_dequeue_buffer
(
struct
efx_tx_queue
*
tx_queue
,
struct
efx_tx_buffer
*
buffer
)
struct
efx_tx_buffer
*
buffer
)
{
{
...
@@ -234,9 +190,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
...
@@ -234,9 +190,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* checked. Update the xmit path's
* checked. Update the xmit path's
* copy of read_count.
* copy of read_count.
*/
*/
++
tx_queue
->
stopped
;
netif_tx_stop_queue
(
tx_queue
->
core_txq
)
;
/* This memory barrier protects the
/* This memory barrier protects the
* change of
stopped
from the access
* change of
queue state
from the access
* of read_count. */
* of read_count. */
smp_mb
();
smp_mb
();
tx_queue
->
old_read_count
=
tx_queue
->
old_read_count
=
...
@@ -244,10 +200,12 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
...
@@ -244,10 +200,12 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
fill_level
=
(
tx_queue
->
insert_count
fill_level
=
(
tx_queue
->
insert_count
-
tx_queue
->
old_read_count
);
-
tx_queue
->
old_read_count
);
q_space
=
efx
->
txq_entries
-
1
-
fill_level
;
q_space
=
efx
->
txq_entries
-
1
-
fill_level
;
if
(
unlikely
(
q_space
--
<=
0
))
if
(
unlikely
(
q_space
--
<=
0
))
{
goto
stop
;
rc
=
NETDEV_TX_BUSY
;
goto
unwind
;
}
smp_mb
();
smp_mb
();
--
tx_queue
->
stopped
;
netif_tx_start_queue
(
tx_queue
->
core_txq
)
;
}
}
insert_ptr
=
tx_queue
->
insert_count
&
tx_queue
->
ptr_mask
;
insert_ptr
=
tx_queue
->
insert_count
&
tx_queue
->
ptr_mask
;
...
@@ -307,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
...
@@ -307,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Mark the packet as transmitted, and free the SKB ourselves */
/* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
goto
unwind
;
stop:
rc
=
NETDEV_TX_BUSY
;
if
(
tx_queue
->
stopped
==
1
)
efx_stop_queue
(
tx_queue
->
channel
);
unwind:
unwind:
/* Work backwards until we hit the original insert pointer value */
/* Work backwards until we hit the original insert pointer value */
...
@@ -400,32 +351,21 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
...
@@ -400,32 +351,21 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
{
unsigned
fill_level
;
unsigned
fill_level
;
struct
efx_nic
*
efx
=
tx_queue
->
efx
;
struct
efx_nic
*
efx
=
tx_queue
->
efx
;
struct
netdev_queue
*
queue
;
EFX_BUG_ON_PARANOID
(
index
>
tx_queue
->
ptr_mask
);
EFX_BUG_ON_PARANOID
(
index
>
tx_queue
->
ptr_mask
);
efx_dequeue_buffers
(
tx_queue
,
index
);
efx_dequeue_buffers
(
tx_queue
,
index
);
/* See if we need to restart the netif queue. This barrier
/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of
* separates the update of read_count from the test of
the
*
stopped
. */
*
queue state
. */
smp_mb
();
smp_mb
();
if
(
unlikely
(
tx_queue
->
stopped
)
&&
likely
(
efx
->
port_enabled
))
{
if
(
unlikely
(
netif_tx_queue_stopped
(
tx_queue
->
core_txq
))
&&
likely
(
efx
->
port_enabled
))
{
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
read_count
;
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
read_count
;
if
(
fill_level
<
EFX_TXQ_THRESHOLD
(
efx
))
{
if
(
fill_level
<
EFX_TXQ_THRESHOLD
(
efx
))
{
EFX_BUG_ON_PARANOID
(
!
efx_dev_registered
(
efx
));
EFX_BUG_ON_PARANOID
(
!
efx_dev_registered
(
efx
));
netif_tx_wake_queue
(
tx_queue
->
core_txq
);
/* Do this under netif_tx_lock(), to avoid racing
* with efx_xmit(). */
queue
=
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
);
__netif_tx_lock
(
queue
,
smp_processor_id
());
if
(
tx_queue
->
stopped
)
{
tx_queue
->
stopped
=
0
;
efx_wake_queue
(
tx_queue
->
channel
);
}
__netif_tx_unlock
(
queue
);
}
}
}
}
...
@@ -487,7 +427,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
...
@@ -487,7 +427,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue
->
read_count
=
0
;
tx_queue
->
read_count
=
0
;
tx_queue
->
old_read_count
=
0
;
tx_queue
->
old_read_count
=
0
;
tx_queue
->
empty_read_count
=
0
|
EFX_EMPTY_COUNT_VALID
;
tx_queue
->
empty_read_count
=
0
|
EFX_EMPTY_COUNT_VALID
;
BUG_ON
(
tx_queue
->
stopped
);
/* Set up TX descriptor ring */
/* Set up TX descriptor ring */
efx_nic_init_tx
(
tx_queue
);
efx_nic_init_tx
(
tx_queue
);
...
@@ -523,12 +462,6 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
...
@@ -523,12 +462,6 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free up TSO header cache */
/* Free up TSO header cache */
efx_fini_tso
(
tx_queue
);
efx_fini_tso
(
tx_queue
);
/* Release queue's stop on port, if any */
if
(
tx_queue
->
stopped
)
{
tx_queue
->
stopped
=
0
;
efx_wake_queue
(
tx_queue
->
channel
);
}
}
}
void
efx_remove_tx_queue
(
struct
efx_tx_queue
*
tx_queue
)
void
efx_remove_tx_queue
(
struct
efx_tx_queue
*
tx_queue
)
...
@@ -770,9 +703,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
...
@@ -770,9 +703,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
* since the xmit path last checked. Update
* since the xmit path last checked. Update
* the xmit path's copy of read_count.
* the xmit path's copy of read_count.
*/
*/
++
tx_queue
->
stopped
;
netif_tx_stop_queue
(
tx_queue
->
core_txq
)
;
/* This memory barrier protects the change of
/* This memory barrier protects the change of
*
stopped
from the access of read_count. */
*
queue state
from the access of read_count. */
smp_mb
();
smp_mb
();
tx_queue
->
old_read_count
=
tx_queue
->
old_read_count
=
ACCESS_ONCE
(
tx_queue
->
read_count
);
ACCESS_ONCE
(
tx_queue
->
read_count
);
...
@@ -784,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
...
@@ -784,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
return
1
;
return
1
;
}
}
smp_mb
();
smp_mb
();
--
tx_queue
->
stopped
;
netif_tx_start_queue
(
tx_queue
->
core_txq
)
;
}
}
insert_ptr
=
tx_queue
->
insert_count
&
tx_queue
->
ptr_mask
;
insert_ptr
=
tx_queue
->
insert_count
&
tx_queue
->
ptr_mask
;
...
@@ -1124,8 +1057,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
...
@@ -1124,8 +1057,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
while
(
1
)
{
while
(
1
)
{
rc
=
tso_fill_packet_with_fragment
(
tx_queue
,
skb
,
&
state
);
rc
=
tso_fill_packet_with_fragment
(
tx_queue
,
skb
,
&
state
);
if
(
unlikely
(
rc
))
if
(
unlikely
(
rc
))
{
goto
stop
;
rc2
=
NETDEV_TX_BUSY
;
goto
unwind
;
}
/* Move onto the next fragment? */
/* Move onto the next fragment? */
if
(
state
.
in_len
==
0
)
{
if
(
state
.
in_len
==
0
)
{
...
@@ -1154,14 +1089,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
...
@@ -1154,14 +1089,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
"Out of memory for TSO headers, or PCI mapping error
\n
"
);
"Out of memory for TSO headers, or PCI mapping error
\n
"
);
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
goto
unwind
;
stop:
rc2
=
NETDEV_TX_BUSY
;
/* Stop the queue if it wasn't stopped before. */
if
(
tx_queue
->
stopped
==
1
)
efx_stop_queue
(
tx_queue
->
channel
);
unwind:
unwind:
/* Free the DMA mapping we were in the process of writing out */
/* Free the DMA mapping we were in the process of writing out */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment