Commit cb6aeb07 authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller

qede: Add support for XDP_TX

Add support for forwarding via XDP. Once the eBPF is attached,
driver would allocate & configure a designated transmission queue
meant solely for forwarding packets. Said queue would share the
receive-queue's interrupt line, and would have it's own Tx statistics.

Infrastructure changes required for this [spread-out through the code]:
 - Determine the DMA direction of the receive buffers based on the presence
of the eBPF program.
 - Turn the sw Tx ring into a union, as regular/XDP queues have different
needs for releasing resources after completion [regular requires the SKB,
XDP requires the transmitted page].
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 496e0517
...@@ -258,6 +258,7 @@ struct qede_rx_queue { ...@@ -258,6 +258,7 @@ struct qede_rx_queue {
u16 sw_rx_prod; u16 sw_rx_prod;
u16 num_rx_buffers; /* Slowpath */ u16 num_rx_buffers; /* Slowpath */
u8 data_direction;
u8 rxq_id; u8 rxq_id;
u32 rx_buf_size; u32 rx_buf_size;
...@@ -294,6 +295,7 @@ struct sw_tx_bd { ...@@ -294,6 +295,7 @@ struct sw_tx_bd {
}; };
struct qede_tx_queue { struct qede_tx_queue {
u8 is_xdp;
bool is_legacy; bool is_legacy;
u16 sw_tx_cons; u16 sw_tx_cons;
u16 sw_tx_prod; u16 sw_tx_prod;
...@@ -310,8 +312,18 @@ struct qede_tx_queue { ...@@ -310,8 +312,18 @@ struct qede_tx_queue {
void __iomem *doorbell_addr; void __iomem *doorbell_addr;
union db_prod tx_db; union db_prod tx_db;
int index; /* Slowpath only */ int index; /* Slowpath only */
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
/* Regular Tx requires skb + metadata for release purpose,
* while XDP requires only the pages themselves.
*/
union {
struct sw_tx_bd *skbs;
struct page **pages;
} sw_tx_ring;
struct sw_tx_bd *sw_tx_ring;
struct qed_chain tx_pbl; struct qed_chain tx_pbl;
/* Slowpath; Should be kept in end [unless missing padding] */ /* Slowpath; Should be kept in end [unless missing padding] */
...@@ -336,10 +348,12 @@ struct qede_fastpath { ...@@ -336,10 +348,12 @@ struct qede_fastpath {
#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) #define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
u8 type; u8 type;
u8 id; u8 id;
u8 xdp_xmit;
struct napi_struct napi; struct napi_struct napi;
struct qed_sb_info *sb_info; struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq; struct qede_rx_queue *rxq;
struct qede_tx_queue *txq; struct qede_tx_queue *txq;
struct qede_tx_queue *xdp_tx;
#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) #define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[VEC_NAME_SIZE]; char name[VEC_NAME_SIZE];
......
...@@ -165,8 +165,13 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev, ...@@ -165,8 +165,13 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev,
int i; int i;
for (i = 0; i < QEDE_NUM_TQSTATS; i++) { for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
sprintf(*buf, "%d: %s", txq->index, if (txq->is_xdp)
qede_tqstats_arr[i].string); sprintf(*buf, "%d [XDP]: %s",
QEDE_TXQ_XDP_TO_IDX(edev, txq),
qede_tqstats_arr[i].string);
else
sprintf(*buf, "%d: %s", txq->index,
qede_tqstats_arr[i].string);
*buf += ETH_GSTRING_LEN; *buf += ETH_GSTRING_LEN;
} }
} }
...@@ -195,6 +200,9 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) ...@@ -195,6 +200,9 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
if (fp->type & QEDE_FASTPATH_RX) if (fp->type & QEDE_FASTPATH_RX)
qede_get_strings_stats_rxq(edev, fp->rxq, &buf); qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
if (fp->type & QEDE_FASTPATH_TX) if (fp->type & QEDE_FASTPATH_TX)
qede_get_strings_stats_txq(edev, fp->txq, &buf); qede_get_strings_stats_txq(edev, fp->txq, &buf);
} }
...@@ -268,6 +276,9 @@ static void qede_get_ethtool_stats(struct net_device *dev, ...@@ -268,6 +276,9 @@ static void qede_get_ethtool_stats(struct net_device *dev,
if (fp->type & QEDE_FASTPATH_RX) if (fp->type & QEDE_FASTPATH_RX)
qede_get_ethtool_stats_rxq(fp->rxq, &buf); qede_get_ethtool_stats_rxq(fp->rxq, &buf);
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
if (fp->type & QEDE_FASTPATH_TX) if (fp->type & QEDE_FASTPATH_TX)
qede_get_ethtool_stats_txq(fp->txq, &buf); qede_get_ethtool_stats_txq(fp->txq, &buf);
} }
...@@ -305,6 +316,9 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) ...@@ -305,6 +316,9 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
/* Account for the Regular Rx statistics */ /* Account for the Regular Rx statistics */
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS; num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
/* Account for XDP statistics [if needed] */
if (edev->xdp_prog)
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS;
return num_stats; return num_stats;
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
...@@ -1216,7 +1230,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1216,7 +1230,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
/* Fill the entry in the SW ring and the BDs in the FW ring */ /* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
txq->sw_tx_ring[idx].skb = skb; txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = qed_chain_produce(&txq->tx_pbl); first_bd = qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd)); memset(first_bd, 0, sizeof(*first_bd));
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
...@@ -1270,7 +1284,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1270,7 +1284,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++; txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL; txq->sw_tx_ring.skbs[idx].skb = NULL;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment