Commit 03a0e14b authored by Benjamin Poirier's avatar Benjamin Poirier Committed by Greg Kroah-Hartman

staging: qlge: Deduplicate rx buffer queue management

The qlge driver (and device) uses two kinds of buffers for reception,
so-called "small buffers" and "large buffers". The two are arranged in
rings, the sbq and lbq. These two share similar data structures and code.

Factor out data structures into a common struct qlge_bq, make required
adjustments to code and dedup the most obvious cases of copy/paste.

This patch should not introduce any functional change other than to some of
the printk format strings.
Signed-off-by: default avatarBenjamin Poirier <bpoirier@suse.com>
Link: https://lore.kernel.org/r/20190927101210.23856-9-bpoirier@suse.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cf1c2987
...@@ -1358,23 +1358,6 @@ struct tx_ring_desc { ...@@ -1358,23 +1358,6 @@ struct tx_ring_desc {
struct tx_ring_desc *next; struct tx_ring_desc *next;
}; };
struct page_chunk {
struct page *page; /* master page */
char *va; /* virt addr for this chunk */
u64 map; /* mapping for master */
unsigned int offset; /* offset for this chunk */
};
struct bq_desc {
union {
struct page_chunk pg_chunk;
struct sk_buff *skb;
} p;
__le64 *addr;
u32 index;
DEFINE_DMA_UNMAP_ADDR(mapaddr);
};
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
struct tx_ring { struct tx_ring {
...@@ -1413,6 +1396,56 @@ enum { ...@@ -1413,6 +1396,56 @@ enum {
RX_Q = 4, /* Handles inbound completions. */ RX_Q = 4, /* Handles inbound completions. */
}; };
struct qlge_page_chunk {
struct page *page;
void *va; /* virt addr including offset */
unsigned int offset;
};
struct qlge_bq_desc {
union {
/* for large buffers */
struct qlge_page_chunk pg_chunk;
/* for small buffers */
struct sk_buff *skb;
} p;
dma_addr_t dma_addr;
/* address in ring where the buffer address (dma_addr) is written for
* the device
*/
__le64 *buf_ptr;
u32 index;
DEFINE_DMA_UNMAP_ADDR(mapaddr);
};
/* buffer queue */
struct qlge_bq {
__le64 *base;
dma_addr_t base_dma;
__le64 *base_indirect;
dma_addr_t base_indirect_dma;
struct qlge_bq_desc *queue;
void __iomem *prod_idx_db_reg;
u32 len; /* entry count */
u32 size; /* size in bytes of hw ring */
u32 prod_idx; /* current sw prod idx */
u32 curr_idx; /* next entry we expect */
u32 clean_idx; /* beginning of new descs */
u32 free_cnt; /* free buffer desc cnt */
enum {
QLGE_SB, /* small buffer */
QLGE_LB, /* large buffer */
} type;
};
#define QLGE_BQ_CONTAINER(bq) \
({ \
typeof(bq) _bq = bq; \
(struct rx_ring *)((char *)_bq - (_bq->type == QLGE_SB ? \
offsetof(struct rx_ring, sbq) : \
offsetof(struct rx_ring, lbq))); \
})
struct rx_ring { struct rx_ring {
struct cqicb cqicb; /* The chip's completion queue init control block. */ struct cqicb cqicb; /* The chip's completion queue init control block. */
...@@ -1430,33 +1463,12 @@ struct rx_ring { ...@@ -1430,33 +1463,12 @@ struct rx_ring {
void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
/* Large buffer queue elements. */ /* Large buffer queue elements. */
u32 lbq_len; /* entry count */ struct qlge_bq lbq;
u32 lbq_size; /* size in bytes of queue */ struct qlge_page_chunk master_chunk;
void *lbq_base; dma_addr_t chunk_dma_addr;
dma_addr_t lbq_base_dma;
void *lbq_base_indirect;
dma_addr_t lbq_base_indirect_dma;
struct page_chunk pg_chunk; /* current page for chunks */
struct bq_desc *lbq; /* array of control blocks */
void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
u32 lbq_prod_idx; /* current sw prod idx */
u32 lbq_curr_idx; /* next entry we expect */
u32 lbq_clean_idx; /* beginning of new descs */
u32 lbq_free_cnt; /* free buffer desc cnt */
/* Small buffer queue elements. */ /* Small buffer queue elements. */
u32 sbq_len; /* entry count */ struct qlge_bq sbq;
u32 sbq_size; /* size in bytes of queue */
void *sbq_base;
dma_addr_t sbq_base_dma;
void *sbq_base_indirect;
dma_addr_t sbq_base_indirect_dma;
struct bq_desc *sbq; /* array of control blocks */
void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
u32 sbq_prod_idx; /* current sw prod idx */
u32 sbq_curr_idx; /* next entry we expect */
u32 sbq_clean_idx; /* beginning of new descs */
u32 sbq_free_cnt; /* free buffer desc cnt */
/* Misc. handler elements. */ /* Misc. handler elements. */
u32 type; /* Type of queue, tx, rx. */ u32 type; /* Type of queue, tx, rx. */
......
...@@ -1758,39 +1758,39 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) ...@@ -1758,39 +1758,39 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); pr_err("rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
pr_err("rx_ring->lbq_base_dma = %llx\n", pr_err("rx_ring->lbq.base_dma = %llx\n",
(unsigned long long) rx_ring->lbq_base_dma); (unsigned long long)rx_ring->lbq.base_dma);
pr_err("rx_ring->lbq_base_indirect = %p\n", pr_err("rx_ring->lbq.base_indirect = %p\n",
rx_ring->lbq_base_indirect); rx_ring->lbq.base_indirect);
pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", pr_err("rx_ring->lbq.base_indirect_dma = %llx\n",
(unsigned long long) rx_ring->lbq_base_indirect_dma); (unsigned long long)rx_ring->lbq.base_indirect_dma);
pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue);
pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); pr_err("rx_ring->lbq.len = %d\n", rx_ring->lbq.len);
pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); pr_err("rx_ring->lbq.size = %d\n", rx_ring->lbq.size);
pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n",
rx_ring->lbq_prod_idx_db_reg); rx_ring->lbq.prod_idx_db_reg);
pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); pr_err("rx_ring->lbq.prod_idx = %d\n", rx_ring->lbq.prod_idx);
pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); pr_err("rx_ring->lbq.curr_idx = %d\n", rx_ring->lbq.curr_idx);
pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
pr_err("rx_ring->sbq_base_dma = %llx\n", pr_err("rx_ring->sbq.base_dma = %llx\n",
(unsigned long long) rx_ring->sbq_base_dma); (unsigned long long)rx_ring->sbq.base_dma);
pr_err("rx_ring->sbq_base_indirect = %p\n", pr_err("rx_ring->sbq.base_indirect = %p\n",
rx_ring->sbq_base_indirect); rx_ring->sbq.base_indirect);
pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", pr_err("rx_ring->sbq.base_indirect_dma = %llx\n",
(unsigned long long) rx_ring->sbq_base_indirect_dma); (unsigned long long)rx_ring->sbq.base_indirect_dma);
pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue);
pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); pr_err("rx_ring->sbq.len = %d\n", rx_ring->sbq.len);
pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); pr_err("rx_ring->sbq.size = %d\n", rx_ring->sbq.size);
pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n",
rx_ring->sbq_prod_idx_db_reg); rx_ring->sbq.prod_idx_db_reg);
pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); pr_err("rx_ring->sbq.prod_idx = %d\n", rx_ring->sbq.prod_idx);
pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); pr_err("rx_ring->sbq.curr_idx = %d\n", rx_ring->sbq.curr_idx);
pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); pr_err("rx_ring->sbq.clean_idx = %d\n", rx_ring->sbq.clean_idx);
pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); pr_err("rx_ring->sbq.free_cnt = %d\n", rx_ring->sbq.free_cnt);
pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
pr_err("rx_ring->irq = %d\n", rx_ring->irq); pr_err("rx_ring->irq = %d\n", rx_ring->irq);
pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment