Commit b8facca0 authored by Ron Mercer's avatar Ron Mercer Committed by David S. Miller

qlge: Allow RX buf rings to be > than 4096 bytes.

RX buffer rings can be comprised of non-contiguous fixed
size chunks of memory. The ring is given to the hardware
as a pointer to a location that stores the location of
the queue.  If the queue is greater than 4096 bytes then
the hardware gets a list of said pointers.
This patch addes the necessary logic to generate the list if
the queue size exceeds 4096.
Signed-off-by: default avatarRon Mercer <ron.mercer@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 88c55e3c
...@@ -41,7 +41,18 @@ ...@@ -41,7 +41,18 @@
#define NUM_SMALL_BUFFERS 512 #define NUM_SMALL_BUFFERS 512
#define NUM_LARGE_BUFFERS 512 #define NUM_LARGE_BUFFERS 512
#define DB_PAGE_SIZE 4096
/* Calculate the number of (4k) pages required to
* contain a buffer queue of the given length.
*/
#define MAX_DB_PAGES_PER_BQ(x) \
(((x * sizeof(u64)) / DB_PAGE_SIZE) + \
(((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
#define SMALL_BUFFER_SIZE 256 #define SMALL_BUFFER_SIZE 256
#define LARGE_BUFFER_SIZE PAGE_SIZE #define LARGE_BUFFER_SIZE PAGE_SIZE
#define MAX_SPLIT_SIZE 1023 #define MAX_SPLIT_SIZE 1023
...@@ -65,8 +76,6 @@ ...@@ -65,8 +76,6 @@
#define TX_DESC_PER_OAL 0 #define TX_DESC_PER_OAL 0
#endif #endif
#define DB_PAGE_SIZE 4096
/* MPI test register definitions. This register /* MPI test register definitions. This register
* is used for determining alternate NIC function's * is used for determining alternate NIC function's
* PCI->func number. * PCI->func number.
......
...@@ -2552,14 +2552,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -2552,14 +2552,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{ {
struct cqicb *cqicb = &rx_ring->cqicb; struct cqicb *cqicb = &rx_ring->cqicb;
void *shadow_reg = qdev->rx_ring_shadow_reg_area + void *shadow_reg = qdev->rx_ring_shadow_reg_area +
(rx_ring->cq_id * sizeof(u64) * 4); (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
(rx_ring->cq_id * sizeof(u64) * 4); (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
void __iomem *doorbell_area = void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0; int err = 0;
u16 bq_len; u16 bq_len;
u64 tmp; u64 tmp;
__le64 *base_indirect_ptr;
int page_entries;
/* Set up the shadow registers for this ring. */ /* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg; rx_ring->prod_idx_sh_reg = shadow_reg;
...@@ -2568,8 +2570,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -2568,8 +2570,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
shadow_reg_dma += sizeof(u64); shadow_reg_dma += sizeof(u64);
rx_ring->lbq_base_indirect = shadow_reg; rx_ring->lbq_base_indirect = shadow_reg;
rx_ring->lbq_base_indirect_dma = shadow_reg_dma; rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
shadow_reg += sizeof(u64); shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
shadow_reg_dma += sizeof(u64); shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
rx_ring->sbq_base_indirect = shadow_reg; rx_ring->sbq_base_indirect = shadow_reg;
rx_ring->sbq_base_indirect_dma = shadow_reg_dma; rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
...@@ -2606,7 +2608,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -2606,7 +2608,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->lbq_len) { if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */ cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq_base_dma;; tmp = (u64)rx_ring->lbq_base_dma;;
*((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp); base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
cqicb->lbq_addr = cqicb->lbq_addr =
cpu_to_le64(rx_ring->lbq_base_indirect_dma); cpu_to_le64(rx_ring->lbq_base_indirect_dma);
bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
...@@ -2623,7 +2632,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) ...@@ -2623,7 +2632,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->sbq_len) { if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */ cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq_base_dma;; tmp = (u64)rx_ring->sbq_base_dma;;
*((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp); base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
cqicb->sbq_addr = cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma); cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size = cqicb->sbq_buf_size =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment