Commit 1b507730 authored by Nick Nunley's avatar Nick Nunley Committed by David S. Miller

ixgbe: use DMA API instead of PCI DMA functions

Signed-off-by: default avatarNicholas Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 47631f85
...@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_tx_buffer *buf = struct ixgbe_tx_buffer *buf =
&(tx_ring->tx_buffer_info[i]); &(tx_ring->tx_buffer_info[i]);
if (buf->dma) if (buf->dma)
pci_unmap_single(pdev, buf->dma, buf->length, dma_unmap_single(&pdev->dev, buf->dma,
PCI_DMA_TODEVICE); buf->length, DMA_TO_DEVICE);
if (buf->skb) if (buf->skb)
dev_kfree_skb(buf->skb); dev_kfree_skb(buf->skb);
} }
...@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_rx_buffer *buf = struct ixgbe_rx_buffer *buf =
&(rx_ring->rx_buffer_info[i]); &(rx_ring->rx_buffer_info[i]);
if (buf->dma) if (buf->dma)
pci_unmap_single(pdev, buf->dma, dma_unmap_single(&pdev->dev, buf->dma,
IXGBE_RXBUFFER_2048, IXGBE_RXBUFFER_2048,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (buf->skb) if (buf->skb)
dev_kfree_skb(buf->skb); dev_kfree_skb(buf->skb);
} }
} }
if (tx_ring->desc) { if (tx_ring->desc) {
pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
tx_ring->dma); tx_ring->dma);
tx_ring->desc = NULL; tx_ring->desc = NULL;
} }
if (rx_ring->desc) { if (rx_ring->desc) {
pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma); rx_ring->dma);
rx_ring->desc = NULL; rx_ring->desc = NULL;
} }
...@@ -1520,8 +1520,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1520,8 +1520,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
&tx_ring->dma))) { &tx_ring->dma, GFP_KERNEL);
if (!(tx_ring->desc)) {
ret_val = 2; ret_val = 2;
goto err_nomem; goto err_nomem;
} }
...@@ -1563,8 +1564,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1563,8 +1564,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info[i].skb = skb; tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[i].length = skb->len; tx_ring->tx_buffer_info[i].length = skb->len;
tx_ring->tx_buffer_info[i].dma = tx_ring->tx_buffer_info[i].dma =
pci_map_single(pdev, skb->data, skb->len, dma_map_single(&pdev->dev, skb->data, skb->len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
desc->read.buffer_addr = desc->read.buffer_addr =
cpu_to_le64(tx_ring->tx_buffer_info[i].dma); cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
desc->read.cmd_type_len = cpu_to_le32(skb->len); desc->read.cmd_type_len = cpu_to_le32(skb->len);
...@@ -1593,8 +1594,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1593,8 +1594,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->size = ALIGN(rx_ring->size, 4096);
if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma))) { &rx_ring->dma, GFP_KERNEL);
if (!(rx_ring->desc)) {
ret_val = 5; ret_val = 5;
goto err_nomem; goto err_nomem;
} }
...@@ -1661,8 +1663,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1661,8 +1663,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
rx_ring->rx_buffer_info[i].skb = skb; rx_ring->rx_buffer_info[i].skb = skb;
rx_ring->rx_buffer_info[i].dma = rx_ring->rx_buffer_info[i].dma =
pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048, dma_map_single(&pdev->dev, skb->data,
PCI_DMA_FROMDEVICE); IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->read.pkt_addr = rx_desc->read.pkt_addr =
cpu_to_le64(rx_ring->rx_buffer_info[i].dma); cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
memset(skb->data, 0x00, skb->len); memset(skb->data, 0x00, skb->len);
...@@ -1775,10 +1777,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) ...@@ -1775,10 +1777,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
ixgbe_create_lbtest_frame( ixgbe_create_lbtest_frame(
tx_ring->tx_buffer_info[k].skb, tx_ring->tx_buffer_info[k].skb,
1024); 1024);
pci_dma_sync_single_for_device(pdev, dma_sync_single_for_device(&pdev->dev,
tx_ring->tx_buffer_info[k].dma, tx_ring->tx_buffer_info[k].dma,
tx_ring->tx_buffer_info[k].length, tx_ring->tx_buffer_info[k].length,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (unlikely(++k == tx_ring->count)) if (unlikely(++k == tx_ring->count))
k = 0; k = 0;
} }
...@@ -1789,10 +1791,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) ...@@ -1789,10 +1791,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
good_cnt = 0; good_cnt = 0;
do { do {
/* receive the sent packets */ /* receive the sent packets */
pci_dma_sync_single_for_cpu(pdev, dma_sync_single_for_cpu(&pdev->dev,
rx_ring->rx_buffer_info[l].dma, rx_ring->rx_buffer_info[l].dma,
IXGBE_RXBUFFER_2048, IXGBE_RXBUFFER_2048,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
ret_val = ixgbe_check_lbtest_frame( ret_val = ixgbe_check_lbtest_frame(
rx_ring->rx_buffer_info[l].skb, 1024); rx_ring->rx_buffer_info[l].skb, 1024);
if (!ret_val) if (!ret_val)
......
...@@ -266,15 +266,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, ...@@ -266,15 +266,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
{ {
if (tx_buffer_info->dma) { if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page) if (tx_buffer_info->mapped_as_page)
pci_unmap_page(adapter->pdev, dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma, tx_buffer_info->dma,
tx_buffer_info->length, tx_buffer_info->length,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
else else
pci_unmap_single(adapter->pdev, dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma, tx_buffer_info->dma,
tx_buffer_info->length, tx_buffer_info->length,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
tx_buffer_info->dma = 0; tx_buffer_info->dma = 0;
} }
if (tx_buffer_info->skb) { if (tx_buffer_info->skb) {
...@@ -721,10 +721,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -721,10 +721,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2); bi->page_offset ^= (PAGE_SIZE / 2);
} }
bi->page_dma = pci_map_page(pdev, bi->page, bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset, bi->page_offset,
(PAGE_SIZE / 2), (PAGE_SIZE / 2),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
} }
if (!bi->skb) { if (!bi->skb) {
...@@ -743,9 +743,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -743,9 +743,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- skb->data)); - skb->data));
bi->skb = skb; bi->skb = skb;
bi->dma = pci_map_single(pdev, skb->data, bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len, rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
} }
/* Refresh the desc even if buffer_addrs didn't change because /* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */ * each write-back erases this info. */
...@@ -886,16 +886,17 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -886,16 +886,17 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/ */
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
else else
pci_unmap_single(pdev, rx_buffer_info->dma, dma_unmap_single(&pdev->dev,
rx_buffer_info->dma,
rx_ring->rx_buf_len, rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
rx_buffer_info->dma = 0; rx_buffer_info->dma = 0;
skb_put(skb, len); skb_put(skb, len);
} }
if (upper_len) { if (upper_len) {
pci_unmap_page(pdev, rx_buffer_info->page_dma, dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0; rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page, rx_buffer_info->page,
...@@ -937,9 +938,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -937,9 +938,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->dma) { if (IXGBE_RSC_CB(skb)->dma) {
pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, dma_unmap_single(&pdev->dev,
IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len, rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0; IXGBE_RSC_CB(skb)->dma = 0;
} }
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
...@@ -3154,9 +3156,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, ...@@ -3154,9 +3156,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i]; rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) { if (rx_buffer_info->dma) {
pci_unmap_single(pdev, rx_buffer_info->dma, dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len, rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
rx_buffer_info->dma = 0; rx_buffer_info->dma = 0;
} }
if (rx_buffer_info->skb) { if (rx_buffer_info->skb) {
...@@ -3165,9 +3167,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, ...@@ -3165,9 +3167,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
do { do {
struct sk_buff *this = skb; struct sk_buff *this = skb;
if (IXGBE_RSC_CB(this)->dma) { if (IXGBE_RSC_CB(this)->dma) {
pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, dma_unmap_single(&pdev->dev,
IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len, rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0; IXGBE_RSC_CB(this)->dma = 0;
} }
skb = skb->prev; skb = skb->prev;
...@@ -3177,8 +3180,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, ...@@ -3177,8 +3180,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page) if (!rx_buffer_info->page)
continue; continue;
if (rx_buffer_info->page_dma) { if (rx_buffer_info->page_dma) {
pci_unmap_page(pdev, rx_buffer_info->page_dma, dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0; rx_buffer_info->page_dma = 0;
} }
put_page(rx_buffer_info->page); put_page(rx_buffer_info->page);
...@@ -4403,8 +4406,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, ...@@ -4403,8 +4406,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
&tx_ring->dma); &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) if (!tx_ring->desc)
goto err; goto err;
...@@ -4474,7 +4477,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, ...@@ -4474,7 +4477,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) { if (!rx_ring->desc) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
...@@ -4535,7 +4539,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, ...@@ -4535,7 +4539,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
vfree(tx_ring->tx_buffer_info); vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL; tx_ring->tx_buffer_info = NULL;
pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
tx_ring->dma);
tx_ring->desc = NULL; tx_ring->desc = NULL;
} }
...@@ -4572,7 +4577,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, ...@@ -4572,7 +4577,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
vfree(rx_ring->rx_buffer_info); vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL; rx_ring->rx_buffer_info = NULL;
pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma);
rx_ring->desc = NULL; rx_ring->desc = NULL;
} }
...@@ -5442,10 +5448,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, ...@@ -5442,10 +5448,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size; tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false; tx_buffer_info->mapped_as_page = false;
tx_buffer_info->dma = pci_map_single(pdev, tx_buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset, skb->data + offset,
size, PCI_DMA_TODEVICE); size, DMA_TO_DEVICE);
if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error; goto dma_error;
tx_buffer_info->time_stamp = jiffies; tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i; tx_buffer_info->next_to_watch = i;
...@@ -5478,12 +5484,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, ...@@ -5478,12 +5484,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size; tx_buffer_info->length = size;
tx_buffer_info->dma = pci_map_page(adapter->pdev, tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page, frag->page,
offset, size, offset, size,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true; tx_buffer_info->mapped_as_page = true;
if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error; goto dma_error;
tx_buffer_info->time_stamp = jiffies; tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i; tx_buffer_info->next_to_watch = i;
...@@ -6061,13 +6067,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -6061,13 +6067,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err) if (err)
return err; return err;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1; pci_using_dac = 1;
} else { } else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, "No usable DMA " dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n"); "configuration, aborting\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment