Commit 16714d98 authored by Benjamin Poirier's avatar Benjamin Poirier Committed by Greg Kroah-Hartman

staging: qlge: Remove rx_ring.sbq_buf_size

Tx completion rings have sbq_buf_size = 0 but there's no case where the
code actually tests on that value. We can remove sbq_buf_size and use a
constant instead.
Signed-off-by: default avatarBenjamin Poirier <bpoirier@suse.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Link: https://lore.kernel.org/r/20190927101210.23856-7-bpoirier@suse.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent a68a5b2f
...@@ -1447,7 +1447,6 @@ struct rx_ring { ...@@ -1447,7 +1447,6 @@ struct rx_ring {
/* Small buffer queue elements. */ /* Small buffer queue elements. */
u32 sbq_len; /* entry count */ u32 sbq_len; /* entry count */
u32 sbq_size; /* size in bytes of queue */ u32 sbq_size; /* size in bytes of queue */
u32 sbq_buf_size;
void *sbq_base; void *sbq_base;
dma_addr_t sbq_base_dma; dma_addr_t sbq_base_dma;
void *sbq_base_indirect; void *sbq_base_indirect;
......
...@@ -1791,7 +1791,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) ...@@ -1791,7 +1791,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
pr_err("rx_ring->irq = %d\n", rx_ring->irq); pr_err("rx_ring->irq = %d\n", rx_ring->irq);
pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
......
...@@ -1164,7 +1164,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -1164,7 +1164,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
map = pci_map_single(qdev->pdev, map = pci_map_single(qdev->pdev,
sbq_desc->p.skb->data, sbq_desc->p.skb->data,
rx_ring->sbq_buf_size, SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) { if (pci_dma_mapping_error(qdev->pdev, map)) {
netif_err(qdev, ifup, qdev->ndev, netif_err(qdev, ifup, qdev->ndev,
...@@ -1594,14 +1594,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, ...@@ -1594,14 +1594,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
pci_dma_sync_single_for_cpu(qdev->pdev, pci_dma_sync_single_for_cpu(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_addr(sbq_desc, mapaddr),
rx_ring->sbq_buf_size, SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
PCI_DMA_FROMDEVICE);
skb_put_data(new_skb, skb->data, length); skb_put_data(new_skb, skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev, pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_addr(sbq_desc, mapaddr),
rx_ring->sbq_buf_size, SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
skb = new_skb; skb = new_skb;
...@@ -1723,7 +1722,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1723,7 +1722,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
sbq_desc = ql_get_curr_sbuf(rx_ring); sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_addr(sbq_desc, mapaddr),
rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb; skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len); ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len); skb_put(skb, hdr_len);
...@@ -1755,13 +1754,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1755,13 +1754,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
pci_dma_sync_single_for_cpu(qdev->pdev, pci_dma_sync_single_for_cpu(qdev->pdev,
dma_unmap_addr(sbq_desc, dma_unmap_addr(sbq_desc,
mapaddr), mapaddr),
rx_ring->sbq_buf_size, SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
skb_put_data(skb, sbq_desc->p.skb->data, length); skb_put_data(skb, sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev, pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr(sbq_desc, dma_unmap_addr(sbq_desc,
mapaddr), mapaddr),
rx_ring->sbq_buf_size, SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
} else { } else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
...@@ -1773,7 +1772,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1773,7 +1772,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb_put(skb, length); skb_put(skb, length);
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_addr(sbq_desc, mapaddr),
rx_ring->sbq_buf_size, SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL; sbq_desc->p.skb = NULL;
} }
...@@ -1846,7 +1845,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1846,7 +1845,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
sbq_desc = ql_get_curr_sbuf(rx_ring); sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_addr(sbq_desc, mapaddr),
rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/* /*
* This is an non TCP/UDP IP frame, so * This is an non TCP/UDP IP frame, so
...@@ -2807,7 +2806,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring ...@@ -2807,7 +2806,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
if (sbq_desc->p.skb) { if (sbq_desc->p.skb) {
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_addr(sbq_desc, mapaddr),
rx_ring->sbq_buf_size, SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb); dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL; sbq_desc->p.skb = NULL;
...@@ -3158,8 +3157,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -3158,8 +3157,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
cqicb->sbq_addr = cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma); cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size = cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUF_MAP_SIZE);
cpu_to_le16((u16)(rx_ring->sbq_buf_size));
bq_len = (rx_ring->sbq_len == 65536) ? 0 : bq_len = (rx_ring->sbq_len == 65536) ? 0 :
(u16) rx_ring->sbq_len; (u16) rx_ring->sbq_len;
cqicb->sbq_len = cpu_to_le16(bq_len); cqicb->sbq_len = cpu_to_le16(bq_len);
...@@ -4109,7 +4107,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) ...@@ -4109,7 +4107,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->sbq_len = NUM_SMALL_BUFFERS; rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size = rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64); rx_ring->sbq_len * sizeof(__le64);
rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
rx_ring->type = RX_Q; rx_ring->type = RX_Q;
} else { } else {
/* /*
...@@ -4123,7 +4120,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) ...@@ -4123,7 +4120,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->lbq_size = 0; rx_ring->lbq_size = 0;
rx_ring->sbq_len = 0; rx_ring->sbq_len = 0;
rx_ring->sbq_size = 0; rx_ring->sbq_size = 0;
rx_ring->sbq_buf_size = 0;
rx_ring->type = TX_Q; rx_ring->type = TX_Q;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment