Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0e33d870
Commit
0e33d870
authored
May 17, 2012
by
Ben Hutchings
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sfc: Use generic DMA API, not PCI-DMA API
Signed-off-by:
Ben Hutchings
<
bhutchings@solarflare.com
>
parent
62f8dc52
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
62 additions
and
63 deletions
+62
-63
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.c
+5
-5
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/net_driver.h
+1
-1
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.c
+4
-4
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/rx.c
+11
-11
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sfc/tx.c
+41
-42
No files found.
drivers/net/ethernet/sfc/efx.c
View file @
0e33d870
...
...
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
* masks event though they reject 46 bit masks.
*/
while
(
dma_mask
>
0x7fffffffUL
)
{
if
(
pci_dma_supported
(
pci_
dev
,
dma_mask
))
{
rc
=
pci_set_dma_mask
(
pci_
dev
,
dma_mask
);
if
(
dma_supported
(
&
pci_dev
->
dev
,
dma_mask
))
{
rc
=
dma_set_mask
(
&
pci_dev
->
dev
,
dma_mask
);
if
(
rc
==
0
)
break
;
}
...
...
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
}
netif_dbg
(
efx
,
probe
,
efx
->
net_dev
,
"using DMA mask %llx
\n
"
,
(
unsigned
long
long
)
dma_mask
);
rc
=
pci_set_consistent_dma_mask
(
pci_
dev
,
dma_mask
);
rc
=
dma_set_coherent_mask
(
&
pci_dev
->
dev
,
dma_mask
);
if
(
rc
)
{
/*
pci_set_consistent_dma
_mask() is not *allowed* to
* fail with a mask that
pci_set_dma
_mask() accepted,
/*
dma_set_coherent
_mask() is not *allowed* to
* fail with a mask that
dma_set
_mask() accepted,
* but just in case...
*/
netif_err
(
efx
,
probe
,
efx
->
net_dev
,
...
...
drivers/net/ethernet/sfc/net_driver.h
View file @
0e33d870
...
...
@@ -100,7 +100,7 @@ struct efx_special_buffer {
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
* @unmap_single: True if
pci
_unmap_single should be used.
* @unmap_single: True if
dma
_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap
*/
struct
efx_tx_buffer
{
...
...
drivers/net/ethernet/sfc/nic.c
View file @
0e33d870
...
...
@@ -308,8 +308,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
int
efx_nic_alloc_buffer
(
struct
efx_nic
*
efx
,
struct
efx_buffer
*
buffer
,
unsigned
int
len
)
{
buffer
->
addr
=
pci_alloc_consistent
(
efx
->
pci_
dev
,
len
,
&
buffer
->
dma_addr
);
buffer
->
addr
=
dma_alloc_coherent
(
&
efx
->
pci_dev
->
dev
,
len
,
&
buffer
->
dma_addr
,
GFP_ATOMIC
);
if
(
!
buffer
->
addr
)
return
-
ENOMEM
;
buffer
->
len
=
len
;
...
...
@@ -320,8 +320,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
void
efx_nic_free_buffer
(
struct
efx_nic
*
efx
,
struct
efx_buffer
*
buffer
)
{
if
(
buffer
->
addr
)
{
pci_free_consistent
(
efx
->
pci_
dev
,
buffer
->
len
,
buffer
->
addr
,
buffer
->
dma_addr
);
dma_free_coherent
(
&
efx
->
pci_dev
->
dev
,
buffer
->
len
,
buffer
->
addr
,
buffer
->
dma_addr
);
buffer
->
addr
=
NULL
;
}
}
...
...
drivers/net/ethernet/sfc/rx.c
View file @
0e33d870
...
...
@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
rx_buf
->
len
=
skb_len
-
NET_IP_ALIGN
;
rx_buf
->
flags
=
0
;
rx_buf
->
dma_addr
=
pci_map_single
(
efx
->
pci_
dev
,
rx_buf
->
dma_addr
=
dma_map_single
(
&
efx
->
pci_dev
->
dev
,
skb
->
data
,
rx_buf
->
len
,
PCI_DMA_FROM
DEVICE
);
if
(
unlikely
(
pci_dma_mapping_error
(
efx
->
pci_
dev
,
rx_buf
->
dma_addr
)))
{
DMA_FROM_
DEVICE
);
if
(
unlikely
(
dma_mapping_error
(
&
efx
->
pci_dev
->
dev
,
rx_buf
->
dma_addr
)))
{
dev_kfree_skb_any
(
skb
);
rx_buf
->
u
.
skb
=
NULL
;
return
-
EIO
;
...
...
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
efx
->
rx_buffer_order
);
if
(
unlikely
(
page
==
NULL
))
return
-
ENOMEM
;
dma_addr
=
pci_map_page
(
efx
->
pci_
dev
,
page
,
0
,
dma_addr
=
dma_map_page
(
&
efx
->
pci_dev
->
dev
,
page
,
0
,
efx_rx_buf_size
(
efx
),
PCI_DMA_FROM
DEVICE
);
if
(
unlikely
(
pci_dma_mapping_error
(
efx
->
pci_
dev
,
dma_addr
)))
{
DMA_FROM_
DEVICE
);
if
(
unlikely
(
dma_mapping_error
(
&
efx
->
pci_dev
->
dev
,
dma_addr
)))
{
__free_pages
(
page
,
efx
->
rx_buffer_order
);
return
-
EIO
;
}
...
...
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state
=
page_address
(
rx_buf
->
u
.
page
);
if
(
--
state
->
refcnt
==
0
)
{
pci_unmap_page
(
efx
->
pci_
dev
,
dma_unmap_page
(
&
efx
->
pci_dev
->
dev
,
state
->
dma_addr
,
efx_rx_buf_size
(
efx
),
PCI_DMA_FROM
DEVICE
);
DMA_FROM_
DEVICE
);
}
}
else
if
(
!
(
rx_buf
->
flags
&
EFX_RX_BUF_PAGE
)
&&
rx_buf
->
u
.
skb
)
{
pci_unmap_single
(
efx
->
pci_
dev
,
rx_buf
->
dma_addr
,
rx_buf
->
len
,
PCI_DMA_FROM
DEVICE
);
dma_unmap_single
(
&
efx
->
pci_dev
->
dev
,
rx_buf
->
dma_addr
,
rx_buf
->
len
,
DMA_FROM_
DEVICE
);
}
}
...
...
drivers/net/ethernet/sfc/tx.c
View file @
0e33d870
...
...
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned
int
*
bytes_compl
)
{
if
(
buffer
->
unmap_len
)
{
struct
pci_dev
*
pci_dev
=
tx_queue
->
efx
->
pci_
dev
;
struct
device
*
dma_dev
=
&
tx_queue
->
efx
->
pci_dev
->
dev
;
dma_addr_t
unmap_addr
=
(
buffer
->
dma_addr
+
buffer
->
len
-
buffer
->
unmap_len
);
if
(
buffer
->
unmap_single
)
pci_unmap_single
(
pci
_dev
,
unmap_addr
,
buffer
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
dma_unmap_single
(
dma
_dev
,
unmap_addr
,
buffer
->
unmap_len
,
DMA_TO_
DEVICE
);
else
pci_unmap_page
(
pci
_dev
,
unmap_addr
,
buffer
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
dma_unmap_page
(
dma
_dev
,
unmap_addr
,
buffer
->
unmap_len
,
DMA_TO_
DEVICE
);
buffer
->
unmap_len
=
0
;
buffer
->
unmap_single
=
false
;
}
...
...
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
netdev_tx_t
efx_enqueue_skb
(
struct
efx_tx_queue
*
tx_queue
,
struct
sk_buff
*
skb
)
{
struct
efx_nic
*
efx
=
tx_queue
->
efx
;
struct
pci_dev
*
pci_dev
=
efx
->
pci_
dev
;
struct
device
*
dma_dev
=
&
efx
->
pci_dev
->
dev
;
struct
efx_tx_buffer
*
buffer
;
skb_frag_t
*
fragment
;
unsigned
int
len
,
unmap_len
=
0
,
fill_level
,
insert_ptr
;
...
...
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
old_read_count
;
q_space
=
efx
->
txq_entries
-
1
-
fill_level
;
/* Map for DMA. Use
pci_map_single rather than pci
_map_page
/* Map for DMA. Use
dma_map_single rather than dma
_map_page
* since this is more efficient on machines with sparse
* memory.
*/
unmap_single
=
true
;
dma_addr
=
pci_map_single
(
pci
_dev
,
skb
->
data
,
len
,
PCI_DMA_TODEVICE
);
dma_addr
=
dma_map_single
(
dma
_dev
,
skb
->
data
,
len
,
PCI_DMA_TODEVICE
);
/* Process all fragments */
while
(
1
)
{
if
(
unlikely
(
pci_dma_mapping_error
(
pci
_dev
,
dma_addr
)))
goto
pci
_err
;
if
(
unlikely
(
dma_mapping_error
(
dma
_dev
,
dma_addr
)))
goto
dma
_err
;
/* Store fields for marking in the per-fragment final
* descriptor */
...
...
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
i
++
;
/* Map for DMA */
unmap_single
=
false
;
dma_addr
=
skb_frag_dma_map
(
&
pci_dev
->
dev
,
fragment
,
0
,
len
,
dma_addr
=
skb_frag_dma_map
(
dma_
dev
,
fragment
,
0
,
len
,
DMA_TO_DEVICE
);
}
...
...
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return
NETDEV_TX_OK
;
pci
_err:
dma
_err:
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
" TX queue %d could not map skb with %d bytes %d "
"fragments for DMA
\n
"
,
tx_queue
->
queue
,
skb
->
len
,
...
...
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Free the fragment we were mid-way through pushing */
if
(
unmap_len
)
{
if
(
unmap_single
)
pci_unmap_single
(
pci
_dev
,
unmap_addr
,
unmap_len
,
PCI_DMA_TO
DEVICE
);
dma_unmap_single
(
dma
_dev
,
unmap_addr
,
unmap_len
,
DMA_TO_
DEVICE
);
else
pci_unmap_page
(
pci
_dev
,
unmap_addr
,
unmap_len
,
PCI_DMA_TO
DEVICE
);
dma_unmap_page
(
dma
_dev
,
unmap_addr
,
unmap_len
,
DMA_TO_
DEVICE
);
}
return
rc
;
...
...
@@ -684,20 +684,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
*/
static
int
efx_tsoh_block_alloc
(
struct
efx_tx_queue
*
tx_queue
)
{
struct
pci_dev
*
pci_dev
=
tx_queue
->
efx
->
pci_dev
;
struct
device
*
dma_dev
=
&
tx_queue
->
efx
->
pci_dev
->
dev
;
struct
efx_tso_header
*
tsoh
;
dma_addr_t
dma_addr
;
u8
*
base_kva
,
*
kva
;
base_kva
=
pci_alloc_consistent
(
pci_dev
,
PAGE_SIZE
,
&
dma_addr
);
base_kva
=
dma_alloc_coherent
(
dma_dev
,
PAGE_SIZE
,
&
dma_addr
,
GFP_ATOMIC
);
if
(
base_kva
==
NULL
)
{
netif_err
(
tx_queue
->
efx
,
tx_err
,
tx_queue
->
efx
->
net_dev
,
"Unable to allocate page for TSO headers
\n
"
);
return
-
ENOMEM
;
}
/*
pci_alloc_consist
ent() allocates pages. */
/*
dma_alloc_coher
ent() allocates pages. */
EFX_BUG_ON_PARANOID
(
dma_addr
&
(
PAGE_SIZE
-
1u
));
for
(
kva
=
base_kva
;
kva
<
base_kva
+
PAGE_SIZE
;
kva
+=
TSOH_STD_SIZE
)
{
...
...
@@ -714,7 +713,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
/* Free up a TSO header, and all others in the same page. */
static
void
efx_tsoh_block_free
(
struct
efx_tx_queue
*
tx_queue
,
struct
efx_tso_header
*
tsoh
,
struct
pci_dev
*
pci
_dev
)
struct
device
*
dma
_dev
)
{
struct
efx_tso_header
**
p
;
unsigned
long
base_kva
;
...
...
@@ -731,7 +730,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
p
=
&
(
*
p
)
->
next
;
}
pci_free_consistent
(
pci
_dev
,
PAGE_SIZE
,
(
void
*
)
base_kva
,
base_dma
);
dma_free_coherent
(
dma
_dev
,
PAGE_SIZE
,
(
void
*
)
base_kva
,
base_dma
);
}
static
struct
efx_tso_header
*
...
...
@@ -743,11 +742,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
if
(
unlikely
(
!
tsoh
))
return
NULL
;
tsoh
->
dma_addr
=
pci_map_single
(
tx_queue
->
efx
->
pci_
dev
,
tsoh
->
dma_addr
=
dma_map_single
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
TSOH_BUFFER
(
tsoh
),
header_len
,
PCI_DMA_TO
DEVICE
);
if
(
unlikely
(
pci_dma_mapping_error
(
tx_queue
->
efx
->
pci_
dev
,
tsoh
->
dma_addr
)))
{
DMA_TO_
DEVICE
);
if
(
unlikely
(
dma_mapping_error
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
tsoh
->
dma_addr
)))
{
kfree
(
tsoh
);
return
NULL
;
}
...
...
@@ -759,9 +758,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
static
void
efx_tsoh_heap_free
(
struct
efx_tx_queue
*
tx_queue
,
struct
efx_tso_header
*
tsoh
)
{
pci_unmap_single
(
tx_queue
->
efx
->
pci_
dev
,
dma_unmap_single
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
tsoh
->
dma_addr
,
tsoh
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
kfree
(
tsoh
);
}
...
...
@@ -892,13 +891,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
unmap_addr
=
(
buffer
->
dma_addr
+
buffer
->
len
-
buffer
->
unmap_len
);
if
(
buffer
->
unmap_single
)
pci_unmap_single
(
tx_queue
->
efx
->
pci_
dev
,
dma_unmap_single
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
unmap_addr
,
buffer
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
else
pci_unmap_page
(
tx_queue
->
efx
->
pci_
dev
,
dma_unmap_page
(
&
tx_queue
->
efx
->
pci_dev
->
dev
,
unmap_addr
,
buffer
->
unmap_len
,
PCI_DMA_TO
DEVICE
);
DMA_TO_
DEVICE
);
buffer
->
unmap_len
=
0
;
}
buffer
->
len
=
0
;
...
...
@@ -954,9 +953,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
int
hl
=
st
->
header_len
;
int
len
=
skb_headlen
(
skb
)
-
hl
;
st
->
unmap_addr
=
pci_map_single
(
efx
->
pci_
dev
,
skb
->
data
+
hl
,
len
,
PCI_DMA_TO
DEVICE
);
if
(
likely
(
!
pci_dma_mapping_error
(
efx
->
pci_
dev
,
st
->
unmap_addr
)))
{
st
->
unmap_addr
=
dma_map_single
(
&
efx
->
pci_dev
->
dev
,
skb
->
data
+
hl
,
len
,
DMA_TO_
DEVICE
);
if
(
likely
(
!
dma_mapping_error
(
&
efx
->
pci_dev
->
dev
,
st
->
unmap_addr
)))
{
st
->
unmap_single
=
true
;
st
->
unmap_len
=
len
;
st
->
in_len
=
len
;
...
...
@@ -1008,7 +1007,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
buffer
->
continuation
=
!
end_of_packet
;
if
(
st
->
in_len
==
0
)
{
/* Transfer ownership of the
pci
mapping */
/* Transfer ownership of the
DMA
mapping */
buffer
->
unmap_len
=
st
->
unmap_len
;
buffer
->
unmap_single
=
st
->
unmap_single
;
st
->
unmap_len
=
0
;
...
...
@@ -1181,18 +1180,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
mem_err:
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
"Out of memory for TSO headers, or
PCI
mapping error
\n
"
);
"Out of memory for TSO headers, or
DMA
mapping error
\n
"
);
dev_kfree_skb_any
(
skb
);
unwind:
/* Free the DMA mapping we were in the process of writing out */
if
(
state
.
unmap_len
)
{
if
(
state
.
unmap_single
)
pci_unmap_single
(
efx
->
pci_
dev
,
state
.
unmap_addr
,
state
.
unmap_len
,
PCI_DMA_TO
DEVICE
);
dma_unmap_single
(
&
efx
->
pci_dev
->
dev
,
state
.
unmap_addr
,
state
.
unmap_len
,
DMA_TO_
DEVICE
);
else
pci_unmap_page
(
efx
->
pci_
dev
,
state
.
unmap_addr
,
state
.
unmap_len
,
PCI_DMA_TO
DEVICE
);
dma_unmap_page
(
&
efx
->
pci_dev
->
dev
,
state
.
unmap_addr
,
state
.
unmap_len
,
DMA_TO_
DEVICE
);
}
efx_enqueue_unwind
(
tx_queue
);
...
...
@@ -1216,5 +1215,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
while
(
tx_queue
->
tso_headers_free
!=
NULL
)
efx_tsoh_block_free
(
tx_queue
,
tx_queue
->
tso_headers_free
,
tx_queue
->
efx
->
pci_
dev
);
&
tx_queue
->
efx
->
pci_dev
->
dev
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment