Commit 52e55f3c authored by Ron Mercer's avatar Ron Mercer Committed by David S. Miller

qlge: Remove inline math for small rx buf mapping.

rx_ring->sbq_buf_len now holds the length of the mapped portion of the
buffer rather than the overall length.
Signed-off-by: default avatarRon Mercer <ron.mercer@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 88051b4e
......@@ -54,7 +54,8 @@
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
#define SMALL_BUFFER_SIZE 256
#define SMALL_BUFFER_SIZE 512
#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
#define LARGE_BUFFER_SIZE PAGE_SIZE
#define MAX_SPLIT_SIZE 1023
#define QLGE_SB_PAD 32
......
......@@ -1147,7 +1147,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
sbq_desc->index);
sbq_desc->p.skb =
netdev_alloc_skb(qdev->ndev,
rx_ring->sbq_buf_size);
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
QPRINTK(qdev, PROBE, ERR,
"Couldn't get an skb.\n");
......@@ -1157,8 +1157,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
map = pci_map_single(qdev->pdev,
sbq_desc->p.skb->data,
rx_ring->sbq_buf_size /
2, PCI_DMA_FROMDEVICE);
rx_ring->sbq_buf_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
......@@ -1168,7 +1168,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
pci_unmap_addr_set(sbq_desc, mapaddr, map);
pci_unmap_len_set(sbq_desc, maplen,
rx_ring->sbq_buf_size / 2);
rx_ring->sbq_buf_size);
*sbq_desc->addr = cpu_to_le64(map);
}
......@@ -2693,7 +2693,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size =
cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
cpu_to_le16((u16)(rx_ring->sbq_buf_size));
bq_len = (rx_ring->sbq_len == 65536) ? 0 :
(u16) rx_ring->sbq_len;
cqicb->sbq_len = cpu_to_le16(bq_len);
......@@ -3269,7 +3269,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, FSC, mask | value);
ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
......@@ -3549,7 +3549,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
rx_ring->type = RX_Q;
} else {
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment