Commit 1feeab80 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: XDP, Add array for WQE info descriptors

Each xdp_wqe_info instance describes the number of data-segments
and WQEBBs of the WQE.
This is useful for a downstream patch that adds support for
Multi-Packet TX WQE feature.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent fea28dd6
...@@ -411,6 +411,11 @@ struct mlx5e_xdp_info_fifo { ...@@ -411,6 +411,11 @@ struct mlx5e_xdp_info_fifo {
u32 mask; u32 mask;
}; };
struct mlx5e_xdp_wqe_info {
u8 num_wqebbs;
u8 num_ds;
};
struct mlx5e_xdpsq { struct mlx5e_xdpsq {
/* data path */ /* data path */
...@@ -430,6 +435,7 @@ struct mlx5e_xdpsq { ...@@ -430,6 +435,7 @@ struct mlx5e_xdpsq {
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats; struct mlx5e_xdpsq_stats *stats;
struct { struct {
struct mlx5e_xdp_wqe_info *wqe_info;
struct mlx5e_xdp_info_fifo xdpi_fifo; struct mlx5e_xdp_info_fifo xdpi_fifo;
} db; } db;
void __iomem *uar_map; void __iomem *uar_map;
......
...@@ -199,19 +199,27 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) ...@@ -199,19 +199,27 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
get_cqe_opcode(cqe)); get_cqe_opcode(cqe));
do { do {
struct mlx5e_xdp_info xdpi = struct mlx5e_xdp_wqe_info *wi;
mlx5e_xdpi_fifo_pop(xdpi_fifo); u16 ci, j;
last_wqe = (sqcc == wqe_counter); last_wqe = (sqcc == wqe_counter);
sqcc++; ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
if (is_redirect) {
xdp_return_frame(xdpi.xdpf); sqcc += wi->num_wqebbs;
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE); for (j = 0; j < wi->num_ds; j++) {
} else { struct mlx5e_xdp_info xdpi =
/* Recycle RX page */ mlx5e_xdpi_fifo_pop(xdpi_fifo);
mlx5e_page_release(rq, &xdpi.di, true);
if (is_redirect) {
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, true);
}
} }
} while (!last_wqe); } while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
...@@ -233,18 +241,26 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq) ...@@ -233,18 +241,26 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
bool is_redirect = !rq; bool is_redirect = !rq;
while (sq->cc != sq->pc) { while (sq->cc != sq->pc) {
struct mlx5e_xdp_info xdpi = struct mlx5e_xdp_wqe_info *wi;
mlx5e_xdpi_fifo_pop(xdpi_fifo); u16 ci, i;
sq->cc++; ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
wi = &sq->db.wqe_info[ci];
if (is_redirect) { sq->cc += wi->num_wqebbs;
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr, for (i = 0; i < wi->num_ds; i++) {
xdpi.xdpf->len, DMA_TO_DEVICE); struct mlx5e_xdp_info xdpi =
} else { mlx5e_xdpi_fifo_pop(xdpi_fifo);
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, false); if (is_redirect) {
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, false);
}
} }
} }
} }
......
...@@ -993,6 +993,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) ...@@ -993,6 +993,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{ {
kvfree(sq->db.xdpi_fifo.xi); kvfree(sq->db.xdpi_fifo.xi);
kvfree(sq->db.wqe_info);
} }
static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
...@@ -1015,8 +1016,14 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) ...@@ -1015,8 +1016,14 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{ {
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int err; int err;
sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
err = mlx5e_alloc_xdpsq_fifo(sq, numa); err = mlx5e_alloc_xdpsq_fifo(sq, numa);
if (err) { if (err) {
mlx5e_free_xdpsq_db(sq); mlx5e_free_xdpsq_db(sq);
...@@ -1606,6 +1613,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, ...@@ -1606,6 +1613,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
/* Pre initialize fixed WQE fields */ /* Pre initialize fixed WQE fields */
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
...@@ -1616,6 +1624,9 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, ...@@ -1616,6 +1624,9 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
wi->num_wqebbs = 1;
wi->num_ds = 1;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment