Commit 9eb22357 authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller

qede: Better utilize the qede_[rt]x_queue

Improve the cacheline usage of both queues by reordering -
This reduces the cachelines required for egress datapath processing
from 3 to 2 and those required by ingress datapath processing by 2.

It also changes a couple of datapath related functions that currently
require either the fastpath or the qede_dev, changing them to be based
on the tx/rx queue instead.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8a472530
...@@ -243,27 +243,33 @@ struct qede_agg_info { ...@@ -243,27 +243,33 @@ struct qede_agg_info {
}; };
struct qede_rx_queue { struct qede_rx_queue {
__le16 *hw_cons_ptr; __le16 *hw_cons_ptr;
struct sw_rx_data *sw_rx_ring; void __iomem *hw_rxq_prod_addr;
u16 sw_rx_cons;
u16 sw_rx_prod;
struct qed_chain rx_bd_ring;
struct qed_chain rx_comp_ring;
void __iomem *hw_rxq_prod_addr;
/* GRO */ /* Required for the allocation of replacement buffers */
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; struct device *dev;
u16 sw_rx_cons;
u16 sw_rx_prod;
int rx_buf_size; u16 num_rx_buffers; /* Slowpath */
unsigned int rx_buf_seg_size; u8 rxq_id;
u16 num_rx_buffers; u32 rx_buf_size;
u16 rxq_id; u32 rx_buf_seg_size;
u64 rcv_pkts; u64 rcv_pkts;
u64 rx_hw_errors;
u64 rx_alloc_errors; struct sw_rx_data *sw_rx_ring;
u64 rx_ip_frags; struct qed_chain rx_bd_ring;
struct qed_chain rx_comp_ring ____cacheline_aligned;
/* GRO */
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
u64 rx_hw_errors;
u64 rx_alloc_errors;
u64 rx_ip_frags;
void *handle; void *handle;
}; };
...@@ -281,22 +287,28 @@ struct sw_tx_bd { ...@@ -281,22 +287,28 @@ struct sw_tx_bd {
}; };
struct qede_tx_queue { struct qede_tx_queue {
int index; /* Queue index */ bool is_legacy;
__le16 *hw_cons_ptr; u16 sw_tx_cons;
struct sw_tx_bd *sw_tx_ring; u16 sw_tx_prod;
u16 sw_tx_cons; u16 num_tx_buffers; /* Slowpath only */
u16 sw_tx_prod;
struct qed_chain tx_pbl;
void __iomem *doorbell_addr;
union db_prod tx_db;
u16 num_tx_buffers;
u64 xmit_pkts;
u64 stopped_cnt;
bool is_legacy;
void *handle;
u64 xmit_pkts;
u64 stopped_cnt;
__le16 *hw_cons_ptr;
/* Needed for the mapping of packets */
struct device *dev;
void __iomem *doorbell_addr;
union db_prod tx_db;
int index; /* Slowpath only */
struct sw_tx_bd *sw_tx_ring;
struct qed_chain tx_pbl;
/* Slowpath; Should be kept in end [unless missing padding] */
void *handle;
}; };
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
...@@ -363,8 +375,7 @@ void __qede_lock(struct qede_dev *edev); ...@@ -363,8 +375,7 @@ void __qede_lock(struct qede_dev *edev);
void __qede_unlock(struct qede_dev *edev); void __qede_unlock(struct qede_dev *edev);
bool qede_has_rx_work(struct qede_rx_queue *rxq); bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq); int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13 #define RX_RING_SIZE_POW 13
......
...@@ -1337,13 +1337,13 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) ...@@ -1337,13 +1337,13 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
break; break;
} }
qede_recycle_rx_bd_ring(rxq, edev, 1); qede_recycle_rx_bd_ring(rxq, 1);
qed_chain_recycle_consumed(&rxq->rx_comp_ring); qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break; break;
} }
DP_INFO(edev, "Not the transmitted packet\n"); DP_INFO(edev, "Not the transmitted packet\n");
qede_recycle_rx_bd_ring(rxq, edev, 1); qede_recycle_rx_bd_ring(rxq, 1);
qed_chain_recycle_consumed(&rxq->rx_comp_ring); qed_chain_recycle_consumed(&rxq->rx_comp_ring);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment