Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
0e33d870
Commit
0e33d870
authored
May 17, 2012
by
Ben Hutchings
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sfc: Use generic DMA API, not PCI-DMA API
Signed-off-by:
Ben Hutchings
<
bhutchings@solarflare.com
>
parent
62f8dc52
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
62 additions
and
63 deletions
+62
-63
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.c
+5
-5
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/net_driver.h
+1
-1
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.c
+4
-4
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/rx.c
+11
-11
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sfc/tx.c
+41
-42
No files found.
drivers/net/ethernet/sfc/efx.c
View file @
0e33d870
...
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
...
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
* masks event though they reject 46 bit masks.
* masks event though they reject 46 bit masks.
*/
*/
while
(
dma_mask
>
0x7fffffffUL
)
{
while
(
dma_mask
>
0x7fffffffUL
)
{
if
(
pci_dma_supported
(
pci_
dev
,
dma_mask
))
{
if
(
dma_supported
(
&
pci_dev
->
dev
,
dma_mask
))
{
rc
=
pci_set_dma_mask
(
pci_
dev
,
dma_mask
);
rc
=
dma_set_mask
(
&
pci_dev
->
dev
,
dma_mask
);
if
(
rc
==
0
)
if
(
rc
==
0
)
break
;
break
;
}
}
...
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
...
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
}
}
netif_dbg
(
efx
,
probe
,
efx
->
net_dev
,
netif_dbg
(
efx
,
probe
,
efx
->
net_dev
,
"using DMA mask %llx
\n
"
,
(
unsigned
long
long
)
dma_mask
);
"using DMA mask %llx
\n
"
,
(
unsigned
long
long
)
dma_mask
);
rc
=
pci_set_consistent_dma_mask
(
pci_
dev
,
dma_mask
);
rc
=
dma_set_coherent_mask
(
&
pci_dev
->
dev
,
dma_mask
);
if
(
rc
)
{
if
(
rc
)
{
/*
pci_set_consistent_dma
_mask() is not *allowed* to
/*
dma_set_coherent
_mask() is not *allowed* to
* fail with a mask that
pci_set_dma
_mask() accepted,
* fail with a mask that
dma_set
_mask() accepted,
* but just in case...
* but just in case...
*/
*/
netif_err
(
efx
,
probe
,
efx
->
net_dev
,
netif_err
(
efx
,
probe
,
efx
->
net_dev
,
...
...
drivers/net/ethernet/sfc/net_driver.h
View file @
0e33d870
...
@@ -100,7 +100,7 @@ struct efx_special_buffer {
...
@@ -100,7 +100,7 @@ struct efx_special_buffer {
* @len: Length of this fragment.
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
* @continuation: True if this fragment is not the end of a packet.
* @unmap_single: True if
pci
_unmap_single should be used.
* @unmap_single: True if
dma
_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap
* @unmap_len: Length of this fragment to unmap
*/
*/
struct
efx_tx_buffer
{
struct
efx_tx_buffer
{
...
...
drivers/net/ethernet/sfc/nic.c
View file @
0e33d870
...
@@ -308,8 +308,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
...
@@ -308,8 +308,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
int
efx_nic_alloc_buffer
(
struct
efx_nic
*
efx
,
struct
efx_buffer
*
buffer
,
int
efx_nic_alloc_buffer
(
struct
efx_nic
*
efx
,
struct
efx_buffer
*
buffer
,
unsigned
int
len
)
unsigned
int
len
)
{
{
buffer
->
addr
=
pci_alloc_consistent
(
efx
->
pci_
dev
,
len
,
buffer
->
addr
=
dma_alloc_coherent
(
&
efx
->
pci_dev
->
dev
,
len
,
&
buffer
->
dma_addr
);
&
buffer
->
dma_addr
,
GFP_ATOMIC
);
if
(
!
buffer
->
addr
)
if
(
!
buffer
->
addr
)
return
-
ENOMEM
;
return
-
ENOMEM
;
buffer
->
len
=
len
;
buffer
->
len
=
len
;
...
@@ -320,7 +320,7 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
...
@@ -320,7 +320,7 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
void
efx_nic_free_buffer
(
struct
efx_nic
*
efx
,
struct
efx_buffer
*
buffer
)
void
efx_nic_free_buffer
(
struct
efx_nic
*
efx
,
struct
efx_buffer
*
buffer
)
{
{
if
(
buffer
->
addr
)
{
if
(
buffer
->
addr
)
{
pci_free_consistent
(
efx
->
pci_
dev
,
buffer
->
len
,
dma_free_coherent
(
&
efx
->
pci_dev
->
dev
,
buffer
->
len
,
buffer
->
addr
,
buffer
->
dma_addr
);
buffer
->
addr
,
buffer
->
dma_addr
);
buffer
->
addr
=
NULL
;
buffer
->
addr
=
NULL
;
}
}
...
...
drivers/net/ethernet/sfc/rx.c
View file @
0e33d870
...
@@ -155,10 +155,10 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
...
@@ -155,10 +155,10 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
rx_buf
->
len
=
skb_len
-
NET_IP_ALIGN
;
rx_buf
->
len
=
skb_len
-
NET_IP_ALIGN
;
rx_buf
->
flags
=
0
;
rx_buf
->
flags
=
0
;
rx_buf
->
dma_addr
=
pci_map_single
(
efx
->
pci_
dev
,
rx_buf
->
dma_addr
=
dma_map_single
(
&
efx
->
pci_dev
->
dev
,
skb
->
data
,
rx_buf
->
len
,
skb
->
data
,
rx_buf
->
len
,
PCI_DMA_FROM
DEVICE
);
DMA_FROM_
DEVICE
);
if
(
unlikely
(
pci_dma_mapping_error
(
efx
->
pci_
dev
,
if
(
unlikely
(
dma_mapping_error
(
&
efx
->
pci_dev
->
dev
,
rx_buf
->
dma_addr
)))
{
rx_buf
->
dma_addr
)))
{
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
rx_buf
->
u
.
skb
=
NULL
;
rx_buf
->
u
.
skb
=
NULL
;
...
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
...
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
efx
->
rx_buffer_order
);
efx
->
rx_buffer_order
);
if
(
unlikely
(
page
==
NULL
))
if
(
unlikely
(
page
==
NULL
))
return
-
ENOMEM
;
return
-
ENOMEM
;
dma_addr
=
pci_map_page
(
efx
->
pci_
dev
,
page
,
0
,
dma_addr
=
dma_map_page
(
&
efx
->
pci_dev
->
dev
,
page
,
0
,
efx_rx_buf_size
(
efx
),
efx_rx_buf_size
(
efx
),
PCI_DMA_FROM
DEVICE
);
DMA_FROM_
DEVICE
);
if
(
unlikely
(
pci_dma_mapping_error
(
efx
->
pci_
dev
,
dma_addr
)))
{
if
(
unlikely
(
dma_mapping_error
(
&
efx
->
pci_dev
->
dev
,
dma_addr
)))
{
__free_pages
(
page
,
efx
->
rx_buffer_order
);
__free_pages
(
page
,
efx
->
rx_buffer_order
);
return
-
EIO
;
return
-
EIO
;
}
}
...
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
...
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state
=
page_address
(
rx_buf
->
u
.
page
);
state
=
page_address
(
rx_buf
->
u
.
page
);
if
(
--
state
->
refcnt
==
0
)
{
if
(
--
state
->
refcnt
==
0
)
{
pci_unmap_page
(
efx
->
pci_
dev
,
dma_unmap_page
(
&
efx
->
pci_dev
->
dev
,
state
->
dma_addr
,
state
->
dma_addr
,
efx_rx_buf_size
(
efx
),
efx_rx_buf_size
(
efx
),
PCI_DMA_FROM
DEVICE
);
DMA_FROM_
DEVICE
);
}
}
}
else
if
(
!
(
rx_buf
->
flags
&
EFX_RX_BUF_PAGE
)
&&
rx_buf
->
u
.
skb
)
{
}
else
if
(
!
(
rx_buf
->
flags
&
EFX_RX_BUF_PAGE
)
&&
rx_buf
->
u
.
skb
)
{
pci_unmap_single
(
efx
->
pci_
dev
,
rx_buf
->
dma_addr
,
dma_unmap_single
(
&
efx
->
pci_dev
->
dev
,
rx_buf
->
dma_addr
,
rx_buf
->
len
,
PCI_DMA_FROM
DEVICE
);
rx_buf
->
len
,
DMA_FROM_
DEVICE
);
}
}
}
}
...
...
drivers/net/ethernet/sfc/tx.c
View file @
0e33d870
...
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
...
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned
int
*
bytes_compl
)
unsigned
int
*
bytes_compl
)
{
{
if
(
buffer
->
unmap_len
)
{
if
(
buffer
->
unmap_len
)
{
struct
pci_dev
*
pci_dev
=
tx_queue
->
efx
->
pci_
dev
;
struct
device
*
dma_dev
=
&
tx_queue
->
efx
->
pci_dev
->
dev
;
dma_addr_t
unmap_addr
=
(
buffer
->
dma_addr
+
buffer
->
len
-
dma_addr_t
unmap_addr
=
(
buffer
->
dma_addr
+
buffer
->
len
-
buffer
->
unmap_len
);
buffer
->
unmap_len
);
if
(
buffer
->
unmap_single
)
if
(
buffer
->
unmap_single
)
pci_unmap_single
(
pci
_dev
,
unmap_addr
,
buffer
->
unmap_len
,
dma_unmap_single
(
dma
_dev
,
unmap_addr
,
buffer
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
else
else
pci_unmap_page
(
pci
_dev
,
unmap_addr
,
buffer
->
unmap_len
,
dma_unmap_page
(
dma
_dev
,
unmap_addr
,
buffer
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
buffer
->
unmap_len
=
0
;
buffer
->
unmap_len
=
0
;
buffer
->
unmap_single
=
false
;
buffer
->
unmap_single
=
false
;
}
}
...
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
...
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
netdev_tx_t
efx_enqueue_skb
(
struct
efx_tx_queue
*
tx_queue
,
struct
sk_buff
*
skb
)
netdev_tx_t
efx_enqueue_skb
(
struct
efx_tx_queue
*
tx_queue
,
struct
sk_buff
*
skb
)
{
{
struct
efx_nic
*
efx
=
tx_queue
->
efx
;
struct
efx_nic
*
efx
=
tx_queue
->
efx
;
struct
pci_dev
*
pci_dev
=
efx
->
pci_
dev
;
struct
device
*
dma_dev
=
&
efx
->
pci_dev
->
dev
;
struct
efx_tx_buffer
*
buffer
;
struct
efx_tx_buffer
*
buffer
;
skb_frag_t
*
fragment
;
skb_frag_t
*
fragment
;
unsigned
int
len
,
unmap_len
=
0
,
fill_level
,
insert_ptr
;
unsigned
int
len
,
unmap_len
=
0
,
fill_level
,
insert_ptr
;
...
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
...
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
old_read_count
;
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
old_read_count
;
q_space
=
efx
->
txq_entries
-
1
-
fill_level
;
q_space
=
efx
->
txq_entries
-
1
-
fill_level
;
/* Map for DMA. Use
pci_map_single rather than pci
_map_page
/* Map for DMA. Use
dma_map_single rather than dma
_map_page
* since this is more efficient on machines with sparse
* since this is more efficient on machines with sparse
* memory.
* memory.
*/
*/
unmap_single
=
true
;
unmap_single
=
true
;
dma_addr
=
pci_map_single
(
pci
_dev
,
skb
->
data
,
len
,
PCI_DMA_TODEVICE
);
dma_addr
=
dma_map_single
(
dma
_dev
,
skb
->
data
,
len
,
PCI_DMA_TODEVICE
);
/* Process all fragments */
/* Process all fragments */
while
(
1
)
{
while
(
1
)
{
if
(
unlikely
(
pci_dma_mapping_error
(
pci
_dev
,
dma_addr
)))
if
(
unlikely
(
dma_mapping_error
(
dma
_dev
,
dma_addr
)))
goto
pci
_err
;
goto
dma
_err
;
/* Store fields for marking in the per-fragment final
/* Store fields for marking in the per-fragment final
* descriptor */
* descriptor */
...
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
...
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
i
++
;
i
++
;
/* Map for DMA */
/* Map for DMA */
unmap_single
=
false
;
unmap_single
=
false
;
dma_addr
=
skb_frag_dma_map
(
&
pci_dev
->
dev
,
fragment
,
0
,
len
,
dma_addr
=
skb_frag_dma_map
(
dma_
dev
,
fragment
,
0
,
len
,
DMA_TO_DEVICE
);
DMA_TO_DEVICE
);
}
}
...
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
...
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return
NETDEV_TX_OK
;
return
NETDEV_TX_OK
;
pci
_err:
dma
_err:
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
" TX queue %d could not map skb with %d bytes %d "
" TX queue %d could not map skb with %d bytes %d "
"fragments for DMA
\n
"
,
tx_queue
->
queue
,
skb
->
len
,
"fragments for DMA
\n
"
,
tx_queue
->
queue
,
skb
->
len
,
...
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
...
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Free the fragment we were mid-way through pushing */
/* Free the fragment we were mid-way through pushing */
if
(
unmap_len
)
{
if
(
unmap_len
)
{
if
(
unmap_single
)
if
(
unmap_single
)
pci_unmap_single
(
pci
_dev
,
unmap_addr
,
unmap_len
,
dma_unmap_single
(
dma
_dev
,
unmap_addr
,
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
else
else
pci_unmap_page
(
pci
_dev
,
unmap_addr
,
unmap_len
,
dma_unmap_page
(
dma
_dev
,
unmap_addr
,
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
}
}
return
rc
;
return
rc
;
...
@@ -684,20 +684,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
...
@@ -684,20 +684,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
*/
*/
static
int
efx_tsoh_block_alloc
(
struct
efx_tx_queue
*
tx_queue
)
static
int
efx_tsoh_block_alloc
(
struct
efx_tx_queue
*
tx_queue
)
{
{
struct
device
*
dma_dev
=
&
tx_queue
->
efx
->
pci_dev
->
dev
;
struct
pci_dev
*
pci_dev
=
tx_queue
->
efx
->
pci_dev
;
struct
efx_tso_header
*
tsoh
;
struct
efx_tso_header
*
tsoh
;
dma_addr_t
dma_addr
;
dma_addr_t
dma_addr
;
u8
*
base_kva
,
*
kva
;
u8
*
base_kva
,
*
kva
;
base_kva
=
pci_alloc_consistent
(
pci_dev
,
PAGE_SIZE
,
&
dma_addr
);
base_kva
=
dma_alloc_coherent
(
dma_dev
,
PAGE_SIZE
,
&
dma_addr
,
GFP_ATOMIC
);
if
(
base_kva
==
NULL
)
{
if
(
base_kva
==
NULL
)
{
netif_err
(
tx_queue
->
efx
,
tx_err
,
tx_queue
->
efx
->
net_dev
,
netif_err
(
tx_queue
->
efx
,
tx_err
,
tx_queue
->
efx
->
net_dev
,
"Unable to allocate page for TSO headers
\n
"
);
"Unable to allocate page for TSO headers
\n
"
);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
/*
pci_alloc_consist
ent() allocates pages. */
/*
dma_alloc_coher
ent() allocates pages. */
EFX_BUG_ON_PARANOID
(
dma_addr
&
(
PAGE_SIZE
-
1u
));
EFX_BUG_ON_PARANOID
(
dma_addr
&
(
PAGE_SIZE
-
1u
));
for
(
kva
=
base_kva
;
kva
<
base_kva
+
PAGE_SIZE
;
kva
+=
TSOH_STD_SIZE
)
{
for
(
kva
=
base_kva
;
kva
<
base_kva
+
PAGE_SIZE
;
kva
+=
TSOH_STD_SIZE
)
{
...
@@ -714,7 +713,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
...
@@ -714,7 +713,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
/* Free up a TSO header, and all others in the same page. */
/* Free up a TSO header, and all others in the same page. */
static
void
efx_tsoh_block_free
(
struct
efx_tx_queue
*
tx_queue
,
static
void
efx_tsoh_block_free
(
struct
efx_tx_queue
*
tx_queue
,
struct
efx_tso_header
*
tsoh
,
struct
efx_tso_header
*
tsoh
,
struct
pci_dev
*
pci
_dev
)
struct
device
*
dma
_dev
)
{
{
struct
efx_tso_header
**
p
;
struct
efx_tso_header
**
p
;
unsigned
long
base_kva
;
unsigned
long
base_kva
;
...
@@ -731,7 +730,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
...
@@ -731,7 +730,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
p
=
&
(
*
p
)
->
next
;
p
=
&
(
*
p
)
->
next
;
}
}
pci_free_consistent
(
pci
_dev
,
PAGE_SIZE
,
(
void
*
)
base_kva
,
base_dma
);
dma_free_coherent
(
dma
_dev
,
PAGE_SIZE
,
(
void
*
)
base_kva
,
base_dma
);
}
}
static
struct
efx_tso_header
*
static
struct
efx_tso_header
*
...
@@ -743,10 +742,10 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
...
@@ -743,10 +742,10 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
if
(
unlikely
(
!
tsoh
))
if
(
unlikely
(
!
tsoh
))
return
NULL
;
return
NULL
;
tsoh
->
dma_addr
=
pci_map_single
(
tx_queue
->
efx
->
pci_
dev
,
tsoh
->
dma_addr
=
dma_map_single
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
TSOH_BUFFER
(
tsoh
),
header_len
,
TSOH_BUFFER
(
tsoh
),
header_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
if
(
unlikely
(
pci_dma_mapping_error
(
tx_queue
->
efx
->
pci_
dev
,
if
(
unlikely
(
dma_mapping_error
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
tsoh
->
dma_addr
)))
{
tsoh
->
dma_addr
)))
{
kfree
(
tsoh
);
kfree
(
tsoh
);
return
NULL
;
return
NULL
;
...
@@ -759,9 +758,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
...
@@ -759,9 +758,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
static
void
static
void
efx_tsoh_heap_free
(
struct
efx_tx_queue
*
tx_queue
,
struct
efx_tso_header
*
tsoh
)
efx_tsoh_heap_free
(
struct
efx_tx_queue
*
tx_queue
,
struct
efx_tso_header
*
tsoh
)
{
{
pci_unmap_single
(
tx_queue
->
efx
->
pci_
dev
,
dma_unmap_single
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
tsoh
->
dma_addr
,
tsoh
->
unmap_len
,
tsoh
->
dma_addr
,
tsoh
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
kfree
(
tsoh
);
kfree
(
tsoh
);
}
}
...
@@ -892,13 +891,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
...
@@ -892,13 +891,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
unmap_addr
=
(
buffer
->
dma_addr
+
buffer
->
len
-
unmap_addr
=
(
buffer
->
dma_addr
+
buffer
->
len
-
buffer
->
unmap_len
);
buffer
->
unmap_len
);
if
(
buffer
->
unmap_single
)
if
(
buffer
->
unmap_single
)
pci_unmap_single
(
tx_queue
->
efx
->
pci_
dev
,
dma_unmap_single
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
unmap_addr
,
buffer
->
unmap_len
,
unmap_addr
,
buffer
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
else
else
pci_unmap_page
(
tx_queue
->
efx
->
pci_
dev
,
dma_unmap_page
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
unmap_addr
,
buffer
->
unmap_len
,
unmap_addr
,
buffer
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
buffer
->
unmap_len
=
0
;
buffer
->
unmap_len
=
0
;
}
}
buffer
->
len
=
0
;
buffer
->
len
=
0
;
...
@@ -954,9 +953,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
...
@@ -954,9 +953,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
int
hl
=
st
->
header_len
;
int
hl
=
st
->
header_len
;
int
len
=
skb_headlen
(
skb
)
-
hl
;
int
len
=
skb_headlen
(
skb
)
-
hl
;
st
->
unmap_addr
=
pci_map_single
(
efx
->
pci_
dev
,
skb
->
data
+
hl
,
st
->
unmap_addr
=
dma_map_single
(
&
efx
->
pci_dev
->
dev
,
skb
->
data
+
hl
,
len
,
PCI_DMA_TO
DEVICE
);
len
,
DMA_TO_
DEVICE
);
if
(
likely
(
!
pci_dma_mapping_error
(
efx
->
pci_
dev
,
st
->
unmap_addr
)))
{
if
(
likely
(
!
dma_mapping_error
(
&
efx
->
pci_dev
->
dev
,
st
->
unmap_addr
)))
{
st
->
unmap_single
=
true
;
st
->
unmap_single
=
true
;
st
->
unmap_len
=
len
;
st
->
unmap_len
=
len
;
st
->
in_len
=
len
;
st
->
in_len
=
len
;
...
@@ -1008,7 +1007,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
...
@@ -1008,7 +1007,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
buffer
->
continuation
=
!
end_of_packet
;
buffer
->
continuation
=
!
end_of_packet
;
if
(
st
->
in_len
==
0
)
{
if
(
st
->
in_len
==
0
)
{
/* Transfer ownership of the
pci
mapping */
/* Transfer ownership of the
DMA
mapping */
buffer
->
unmap_len
=
st
->
unmap_len
;
buffer
->
unmap_len
=
st
->
unmap_len
;
buffer
->
unmap_single
=
st
->
unmap_single
;
buffer
->
unmap_single
=
st
->
unmap_single
;
st
->
unmap_len
=
0
;
st
->
unmap_len
=
0
;
...
@@ -1181,18 +1180,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
...
@@ -1181,18 +1180,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
mem_err:
mem_err:
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
"Out of memory for TSO headers, or
PCI
mapping error
\n
"
);
"Out of memory for TSO headers, or
DMA
mapping error
\n
"
);
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
unwind:
unwind:
/* Free the DMA mapping we were in the process of writing out */
/* Free the DMA mapping we were in the process of writing out */
if
(
state
.
unmap_len
)
{
if
(
state
.
unmap_len
)
{
if
(
state
.
unmap_single
)
if
(
state
.
unmap_single
)
pci_unmap_single
(
efx
->
pci_
dev
,
state
.
unmap_addr
,
dma_unmap_single
(
&
efx
->
pci_dev
->
dev
,
state
.
unmap_addr
,
state
.
unmap_len
,
PCI_DMA_TO
DEVICE
);
state
.
unmap_len
,
DMA_TO_
DEVICE
);
else
else
pci_unmap_page
(
efx
->
pci_
dev
,
state
.
unmap_addr
,
dma_unmap_page
(
&
efx
->
pci_dev
->
dev
,
state
.
unmap_addr
,
state
.
unmap_len
,
PCI_DMA_TO
DEVICE
);
state
.
unmap_len
,
DMA_TO_
DEVICE
);
}
}
efx_enqueue_unwind
(
tx_queue
);
efx_enqueue_unwind
(
tx_queue
);
...
@@ -1216,5 +1215,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
...
@@ -1216,5 +1215,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
while
(
tx_queue
->
tso_headers_free
!=
NULL
)
while
(
tx_queue
->
tso_headers_free
!=
NULL
)
efx_tsoh_block_free
(
tx_queue
,
tx_queue
->
tso_headers_free
,
efx_tsoh_block_free
(
tx_queue
,
tx_queue
->
tso_headers_free
,
tx_queue
->
efx
->
pci_
dev
);
&
tx_queue
->
efx
->
pci_dev
->
dev
);
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment