Commit c1783e74 authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: XDP, Add support for multi-buffer XDP redirect-in

Handle multi-buffer XDP redirect-in requests coming through
mlx5e_xdp_xmit.

Extend struct mlx5e_xmit_data_frags with an additional dma_arr field, to
point to the fragments dma mapping, as they cannot be retrieved via the
page_pool_get_dma_addr() function.

Push a dma_addr xdpi instance per each fragment, and use them in the
completion flow to dma_unmap the frags.

Finally, remove the restriction in mlx5e_open_xdpsq, and set the flag in
xdp_features.
Reviewed-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3f734b8c
...@@ -87,6 +87,7 @@ struct mlx5e_xmit_data { ...@@ -87,6 +87,7 @@ struct mlx5e_xmit_data {
struct mlx5e_xmit_data_frags { struct mlx5e_xmit_data_frags {
struct mlx5e_xmit_data xd; struct mlx5e_xmit_data xd;
struct skb_shared_info *sinfo; struct skb_shared_info *sinfo;
dma_addr_t *dma_arr;
}; };
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
......
...@@ -126,6 +126,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -126,6 +126,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
if (xdptxd->has_frags) { if (xdptxd->has_frags) {
xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf); xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
xdptxdf.dma_arr = NULL;
for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) { for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
skb_frag_t *frag = &xdptxdf.sinfo->frags[i]; skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
...@@ -548,7 +549,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, ...@@ -548,7 +549,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
dma_addr_t addr; dma_addr_t addr;
addr = page_pool_get_dma_addr(skb_frag_page(frag)) + addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
page_pool_get_dma_addr(skb_frag_page(frag)) +
skb_frag_off(frag); skb_frag_off(frag);
dseg++; dseg++;
...@@ -601,6 +603,21 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, ...@@ -601,6 +603,21 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
dma_unmap_single(sq->pdev, dma_addr, dma_unmap_single(sq->pdev, dma_addr,
xdpf->len, DMA_TO_DEVICE); xdpf->len, DMA_TO_DEVICE);
if (xdp_frame_has_frags(xdpf)) {
struct skb_shared_info *sinfo;
int j;
sinfo = xdp_get_shared_info_from_frame(xdpf);
for (j = 0; j < sinfo->nr_frags; j++) {
skb_frag_t *frag = &sinfo->frags[j];
xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
dma_addr = xdpi.frame.dma_addr;
dma_unmap_single(sq->pdev, dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
}
xdp_return_frame_bulk(xdpf, bq); xdp_return_frame_bulk(xdpf, bq);
break; break;
} }
...@@ -759,23 +776,57 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, ...@@ -759,23 +776,57 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
sq = &priv->channels.c[sq_num]->xdpsq; sq = &priv->channels.c[sq_num]->xdpsq;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct mlx5e_xmit_data_frags xdptxdf = {};
struct xdp_frame *xdpf = frames[i]; struct xdp_frame *xdpf = frames[i];
struct mlx5e_xmit_data xdptxd = {}; dma_addr_t dma_arr[MAX_SKB_FRAGS];
struct mlx5e_xmit_data *xdptxd;
bool ret; bool ret;
xdptxd.data = xdpf->data; xdptxd = &xdptxdf.xd;
xdptxd.len = xdpf->len; xdptxd->data = xdpf->data;
xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd->len = xdpf->len;
xdptxd.len, DMA_TO_DEVICE); xdptxd->has_frags = xdp_frame_has_frags(xdpf);
xdptxd->dma_addr = dma_map_single(sq->pdev, xdptxd->data,
xdptxd->len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) if (unlikely(dma_mapping_error(sq->pdev, xdptxd->dma_addr)))
break; break;
if (xdptxd->has_frags) {
int j;
xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
xdptxdf.dma_arr = dma_arr;
for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) {
skb_frag_t *frag = &xdptxdf.sinfo->frags[j];
dma_arr[j] = dma_map_single(sq->pdev, skb_frag_address(frag),
skb_frag_size(frag), DMA_TO_DEVICE);
if (!dma_mapping_error(sq->pdev, dma_arr[j]))
continue;
/* mapping error */
while (--j >= 0)
dma_unmap_single(sq->pdev, dma_arr[j],
skb_frag_size(&xdptxdf.sinfo->frags[j]),
DMA_TO_DEVICE);
goto out;
}
}
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, 0); mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
if (unlikely(!ret)) { if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr, int j;
xdptxd.len, DMA_TO_DEVICE);
dma_unmap_single(sq->pdev, xdptxd->dma_addr,
xdptxd->len, DMA_TO_DEVICE);
if (!xdptxd->has_frags)
break;
for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
dma_unmap_single(sq->pdev, dma_arr[j],
skb_frag_size(&xdptxdf.sinfo->frags[j]),
DMA_TO_DEVICE);
break; break;
} }
...@@ -785,10 +836,19 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, ...@@ -785,10 +836,19 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
(union mlx5e_xdp_info) { .frame.xdpf = xdpf }); (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
(union mlx5e_xdp_info) { .frame.dma_addr = xdptxd.dma_addr }); (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd->dma_addr });
if (xdptxd->has_frags) {
int j;
for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
(union mlx5e_xdp_info)
{ .frame.dma_addr = dma_arr[j] });
}
nxmit++; nxmit++;
} }
out:
if (flags & XDP_XMIT_FLUSH) { if (flags & XDP_XMIT_FLUSH) {
if (sq->mpwqe.wqe) if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdp_mpwqe_complete(sq);
......
...@@ -1862,11 +1862,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, ...@@ -1862,11 +1862,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.min_inline_mode = sq->min_inline_mode; csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
/* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet if (param->is_xdp_mb)
* supported by upstream, and there is no defined trigger to allow
* transmitting redirected multi-buffer frames.
*/
if (param->is_xdp_mb && !is_redirect)
set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state); set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
...@@ -4068,7 +4064,8 @@ void mlx5e_set_xdp_feature(struct net_device *netdev) ...@@ -4068,7 +4064,8 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY | NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_NDO_XMIT; NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
val |= NETDEV_XDP_ACT_RX_SG; val |= NETDEV_XDP_ACT_RX_SG;
xdp_set_features_flag(netdev, val); xdp_set_features_flag(netdev, val);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment