Commit 44f4fd03 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Validate striding RQ before enabling XDP

Currently, the driver can silently fall back to legacy RQ after enabling
XDP, even if striding RQ was active before. It happens when PAGE_SIZE is
bigger than the maximum supported stride size. This commit changes this
behavior to more straightforward: if an operation (enabling XDP) doesn't
support the current parameters (striding RQ mode), it fails.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 7e49abb1
......@@ -320,22 +320,27 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false;
return -EOPNOTSUPP;
if (params->xdp_prog) {
/* XSK params are not considered here. If striding RQ is in use,
* and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
* be called with the known XSK params.
*/
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
return false;
}
if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
return -EINVAL;
return true;
return 0;
}
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return -EOPNOTSUPP;
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
return -EINVAL;
return 0;
}
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
......@@ -356,8 +361,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_CYCLIC;
}
......@@ -374,7 +378,7 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
*/
if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
mlx5e_striding_rq_possible(mdev, params) &&
!mlx5e_mpwrq_validate_regular(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
!mlx5e_rx_is_linear_skb(params, NULL)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
......
......@@ -92,7 +92,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
......
......@@ -30,7 +30,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
*/
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */
return mlx5e_rx_is_linear_skb(params, xsk);
}
......
......@@ -1997,10 +1997,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
struct mlx5e_params new_params;
if (enable) {
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return -EOPNOTSUPP;
if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
return -EINVAL;
/* Checking the regular RQ here; mlx5e_validate_xsk_param called
* from mlx5e_open_xsk will check for each XSK queue, and
* mlx5e_safe_switch_params will be reverted if any check fails.
*/
int err = mlx5e_mpwrq_validate_regular(mdev, &priv->channels.params);
if (err)
return err;
} else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
return -EINVAL;
......
......@@ -4582,8 +4582,20 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
if (reset)
mlx5e_set_rq_type(priv->mdev, &new_params);
/* XDP affects striding RQ parameters. Block XDP if striding RQ won't be
* supported with the new parameters: if PAGE_SIZE is bigger than
* MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though
* the MTU is small enough for the linear mode, because XDP uses strides
* of PAGE_SIZE on regular RQs.
*/
if (reset && MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
/* Checking for regular RQs here; XSK RQs were checked on XSK bind. */
err = mlx5e_mpwrq_validate_regular(priv->mdev, &new_params);
if (err)
goto unlock;
}
old_prog = priv->channels.params.xdp_prog;
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment