Commit 179a8b51 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2023-12-04' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2023-12-04

This series provides bug fixes to mlx5 driver.

V1->V2:
  - Drop commit #9 ("net/mlx5e: Forbid devlink reload if IPSec rules are
    offloaded"), we are working on a better fix

Please pull and let me know if there is any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5e3f5b81 ca4ef28d
......@@ -826,6 +826,7 @@ enum {
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
MLX5E_STATE_XDP_ACTIVE,
MLX5E_STATE_CHANNELS_ACTIVE,
};
struct mlx5e_modify_sq_param {
......
......@@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
struct mlx5_flow_spec *spec;
int err;
if (IS_ERR(post_act))
return PTR_ERR(post_act);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
......@@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po
struct mlx5e_post_act_handle *handle;
int err;
if (IS_ERR(post_act))
return ERR_CAST(post_act);
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
......
......@@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
sa_entry->esn_state.esn = esn;
if (sa_entry->esn_state.esn_msb)
sa_entry->esn_state.esn = esn;
else
/* According to RFC4303, section "3.3.3. Sequence Number Generation",
* the first packet sent using a given SA will contain a sequence
* number of 1.
*/
sa_entry->esn_state.esn = max_t(u32, esn, 1);
sa_entry->esn_state.esn_msb = esn_msb;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
......@@ -335,6 +342,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
switch (x->replay_esn->replay_window) {
case 32:
attrs->replay_esn.replay_window =
MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
break;
case 64:
attrs->replay_esn.replay_window =
MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
break;
case 128:
attrs->replay_esn.replay_window =
MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
break;
case 256:
attrs->replay_esn.replay_window =
MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
break;
default:
WARN_ON(true);
return;
}
}
attrs->dir = x->xso.dir;
......@@ -907,9 +935,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
if (ipsec->netevent_nb.notifier_call) {
unregister_netevent_notifier(&ipsec->netevent_nb);
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
ipsec->netevent_nb.notifier_call = NULL;
}
if (ipsec->aso)
mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
......@@ -1018,6 +1048,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
}
}
if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
return -EINVAL;
}
return 0;
}
......@@ -1113,14 +1149,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
};
static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
......@@ -1138,11 +1166,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
else
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
netdev->features |= NETIF_F_HW_ESP;
netdev->hw_enc_features |= NETIF_F_HW_ESP;
......
......@@ -189,11 +189,19 @@ struct mlx5e_ipsec_ft {
u32 refcnt;
};
struct mlx5e_ipsec_drop {
struct mlx5_flow_handle *rule;
struct mlx5_fc *fc;
};
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
struct mlx5e_ipsec_drop replay;
struct mlx5e_ipsec_drop auth;
struct mlx5e_ipsec_drop trailer;
};
struct mlx5e_ipsec_miss {
......@@ -201,19 +209,6 @@ struct mlx5e_ipsec_miss {
struct mlx5_flow_handle *rule;
};
struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
struct mlx5e_ipsec_rule status;
struct mlx5e_ipsec_miss status_drop;
struct mlx5_fc *status_drop_cnt;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
u8 allow_tunnel_mode : 1;
struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_tx_create_attr {
int prio;
int pol_level;
......@@ -248,6 +243,7 @@ struct mlx5e_ipsec {
struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1;
struct mlx5e_ipsec_mpv_work mpv_work;
struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_esn_state {
......
......@@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx {
u8 allow_tunnel_mode : 1;
};
struct mlx5e_ipsec_status_checks {
struct mlx5_flow_group *drop_all_group;
struct mlx5e_ipsec_drop all;
};
struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
struct mlx5e_ipsec_rule status;
struct mlx5e_ipsec_status_checks status_drops;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
u8 allow_tunnel_mode : 1;
};
/* IPsec RX flow steering */
static enum mlx5_traffic_types family2tt(u32 family)
{
......@@ -128,14 +144,37 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
}
static int ipsec_status_rule(struct mlx5_core_dev *mdev,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
mlx5_del_flow_rules(rx->status_drops.all.rule);
mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
mlx5_del_flow_rules(rx->status.rule);
if (rx != ipsec->rx_esw)
return;
#ifdef CONFIG_MLX5_ESWITCH
mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
#endif
}
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5e_ipsec_rx *rx)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5_flow_table *ft = rx->ft.status;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte;
struct mlx5_flow_handle *rule;
struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
int err;
......@@ -143,48 +182,273 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev,
if (!spec)
return -ENOMEM;
/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
MLX5_SET(copy_action_in, action, src_offset, 0);
MLX5_SET(copy_action_in, action, length, 7);
MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
MLX5_SET(copy_action_in, action, dst_offset, 24);
flow_counter = mlx5_fc_create(mdev, true);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt;
}
sa_entry->ipsec_rule.auth.fc = flow_counter;
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter_id = mlx5_fc_id(flow_counter);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.metadata_reg_c_2,
sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"fail to alloc ipsec copy modify_header_id err=%d\n", err);
goto out_spec;
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule;
}
sa_entry->ipsec_rule.auth.rule = rule;
/* create fte */
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
flow_counter = mlx5_fc_create(mdev, true);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt_2;
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
dest.counter_id = mlx5_fc_id(flow_counter);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule_2;
}
sa_entry->ipsec_rule.trailer.rule = rule;
kvfree(spec);
return 0;
err_rule_2:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
err_cnt_2:
mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
err_rule:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
err_cnt:
kvfree(spec);
return err;
}
static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5_flow_table *ft = rx->ft.status;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
flow_counter = mlx5_fc_create(mdev, true);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter_id = mlx5_fc_id(flow_counter);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule;
}
sa_entry->ipsec_rule.replay.rule = rule;
sa_entry->ipsec_rule.replay.fc = flow_counter;
kvfree(spec);
return 0;
err_rule:
mlx5_fc_destroy(mdev, flow_counter);
err_cnt:
kvfree(spec);
return err;
}
static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table *ft = rx->ft.status;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
struct mlx5_flow_group *g;
u32 *flow_group_in;
int err = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!flow_group_in || !spec) {
err = -ENOMEM;
goto err_out;
}
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
g = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop flow group, err=%d\n", err);
goto err_out;
}
flow_counter = mlx5_fc_create(mdev, false);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter_id = mlx5_fc_id(flow_counter);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule;
}
rx->status_drops.drop_all_group = g;
rx->status_drops.all.rule = rule;
rx->status_drops.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
return 0;
err_rule:
mlx5_fc_destroy(mdev, flow_counter);
err_cnt:
mlx5_destroy_flow_group(g);
err_out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.metadata_reg_c_4, 0);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.modify_hdr = modify_hdr;
fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
goto out;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(ipsec->mdev,
"Failed to add ipsec rx status pass rule, err=%d\n", err);
goto err_rule;
}
rx->status.rule = rule;
kvfree(spec);
rx->status.rule = fte;
rx->status.modify_hdr = modify_hdr;
return 0;
out:
mlx5_modify_header_dealloc(mdev, modify_hdr);
out_spec:
err_rule:
kvfree(spec);
return err;
}
static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
ipsec_rx_status_pass_destroy(ipsec, rx);
ipsec_rx_status_drop_destroy(ipsec, rx);
}
static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
int err;
err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
err = ipsec_rx_status_pass_create(ipsec, rx, dest);
if (err)
goto err_pass_create;
return 0;
err_pass_create:
ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
static int ipsec_miss_create(struct mlx5_core_dev *mdev,
struct mlx5_flow_table *ft,
struct mlx5e_ipsec_miss *miss,
......@@ -333,12 +597,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.sa);
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
if (rx == ipsec->rx_esw) {
mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
} else {
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
}
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
......@@ -419,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
......@@ -428,10 +687,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
if (rx == ipsec->rx_esw)
err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
else
err = ipsec_status_rule(mdev, rx, dest);
err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
......@@ -956,13 +1212,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec)
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
}
static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
{
/* SPI number */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
if (encap) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters.inner_esp_spi);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters.inner_esp_spi, spi);
} else {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters.outer_esp_spi);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters.outer_esp_spi, spi);
}
}
static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
......@@ -1052,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
struct mlx5_flow_act *flow_act)
{
enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
u8 num_of_actions = 1;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
switch (dir) {
case XFRM_DEV_OFFLOAD_IN:
MLX5_SET(set_action_in, action, field,
MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
num_of_actions++;
MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
MLX5_SET(set_action_in, action[1], data, val);
MLX5_SET(set_action_in, action[1], offset, 0);
MLX5_SET(set_action_in, action[1], length, 32);
if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
num_of_actions++;
MLX5_SET(set_action_in, action[2], action_type,
MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[2], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
MLX5_SET(set_action_in, action[2], data, 0);
MLX5_SET(set_action_in, action[2], offset, 0);
MLX5_SET(set_action_in, action[2], length, 32);
}
break;
case XFRM_DEV_OFFLOAD_OUT:
MLX5_SET(set_action_in, action, field,
MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
break;
default:
return -EINVAL;
}
MLX5_SET(set_action_in, action, data, val);
MLX5_SET(set_action_in, action, offset, 0);
MLX5_SET(set_action_in, action, length, 32);
MLX5_SET(set_action_in, action[0], data, val);
MLX5_SET(set_action_in, action[0], offset, 0);
MLX5_SET(set_action_in, action[0], length, 32);
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
PTR_ERR(modify_hdr));
......@@ -1321,8 +1605,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
else
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_spi(spec, attrs->spi);
setup_fte_esp(spec);
setup_fte_spi(spec, attrs->spi, attrs->encap);
if (!attrs->encap)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
......@@ -1372,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
err = rx_add_rule_drop_replay(sa_entry, rx);
if (err)
goto err_add_replay;
err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
if (err)
goto err_drop_reason;
kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
......@@ -1380,6 +1674,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
err_drop_reason:
if (sa_entry->ipsec_rule.replay.rule) {
mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
}
err_add_replay:
mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
err_add_cnt:
......@@ -1428,7 +1729,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
setup_fte_spi(spec, attrs->spi);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
......@@ -1809,8 +2110,11 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int err = 0;
if (esw)
down_write(&esw->mode_lock);
if (esw) {
err = mlx5_esw_lock(esw);
if (err)
return err;
}
if (mdev->num_block_ipsec) {
err = -EBUSY;
......@@ -1821,7 +2125,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
unlock:
if (esw)
up_write(&esw->mode_lock);
mlx5_esw_unlock(esw);
return err;
}
......@@ -1887,6 +2191,17 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
if (ipsec_rule->modify_hdr)
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
mlx5_del_flow_rules(ipsec_rule->trailer.rule);
mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
mlx5_del_flow_rules(ipsec_rule->auth.rule);
mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
if (ipsec_rule->replay.rule) {
mlx5_del_flow_rules(ipsec_rule->replay.rule);
mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
}
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
......@@ -1957,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
kfree(ipsec->rx_ipv6);
if (ipsec->is_uplink_rep) {
xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
xa_destroy(&ipsec->ipsec_obj_id_map);
mutex_destroy(&ipsec->tx_esw->ft.mutex);
WARN_ON(ipsec->tx_esw->ft.refcnt);
......@@ -2020,7 +2335,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
mutex_init(&ipsec->tx_esw->ft.mutex);
mutex_init(&ipsec->rx_esw->ft.mutex);
ipsec->tx_esw->ns = ns_esw;
xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
} else {
......
......@@ -6,6 +6,8 @@
#include "ipsec.h"
#include "lib/crypto.h"
#include "lib/ipsec_fs_roce.h"
#include "fs_core.h"
#include "eswitch.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
......@@ -38,7 +40,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
(mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
(mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
is_mdev_legacy_mode(mdev)))) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
......@@ -95,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
attrs->replay_esn.replay_window / 64);
attrs->replay_esn.replay_window);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
......@@ -559,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
ipsec->aso = NULL;
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
......
......@@ -2731,6 +2731,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
ASSERT_RTNL();
if (chs->ptp) {
mlx5e_ptp_close(chs->ptp);
chs->ptp = NULL;
......@@ -3012,17 +3013,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_activate_channels(priv);
set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
if (priv->rx_res)
mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
}
static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
{
WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
if (current_work() != &priv->tx_timeout_work)
cancel_work_sync(&priv->tx_timeout_work);
}
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
if (priv->rx_res)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
mlx5e_cancel_tx_timeout_work(priv);
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_deactivate_channels(priv);
......@@ -4801,8 +4814,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
rtnl_lock();
mutex_lock(&priv->state_lock);
/* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
* through this flow. However, channel closing flows have to wait for
* this work to finish while holding rtnl lock too. So either get the
* lock or find that channels are being closed for other reason and
* this work is not relevant anymore.
*/
while (!rtnl_trylock()) {
if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
return;
msleep(20);
}
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
......@@ -4821,7 +4843,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
}
unlock:
mutex_unlock(&priv->state_lock);
rtnl_unlock();
}
......
......@@ -1497,7 +1497,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
rpriv->rep->vport);
if (dl_port) {
if (!IS_ERR(dl_port)) {
SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
mlx5e_rep_vnic_reporter_create(priv, dl_port);
}
......
......@@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
enum mlx5e_post_meter_type type;
if (IS_ERR(post_act))
return PTR_ERR(post_act);
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
......@@ -3738,6 +3741,20 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
return err;
}
static int
set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr)
{
struct mlx5e_post_act *post_act = get_post_action(priv);
if (IS_ERR(post_act))
return PTR_ERR(post_act);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
return 0;
}
static int
alloc_branch_attr(struct mlx5e_tc_flow *flow,
struct mlx5e_tc_act_branch_ctrl *cond,
......@@ -3761,8 +3778,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
break;
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_PIPE:
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
if (set_branch_dest_ft(flow->priv, attr))
goto out_err;
break;
case FLOW_ACTION_JUMP:
if (*jump_count) {
......@@ -3771,8 +3788,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
goto out_err;
}
*jump_count = cond->extval;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
if (set_branch_dest_ft(flow->priv, attr))
goto out_err;
break;
default:
err = -EOPNOTSUPP;
......
......@@ -21,158 +21,6 @@ enum {
MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
};
static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
mlx5_del_flow_rules(rx->status_drop.rule);
mlx5_destroy_flow_group(rx->status_drop.group);
mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
}
static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
mlx5_del_flow_rules(rx->status.rule);
mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
}
static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table *ft = rx->ft.status;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
struct mlx5_flow_group *g;
u32 *flow_group_in;
int err = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!flow_group_in || !spec) {
err = -ENOMEM;
goto err_out;
}
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
g = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop flow group, err=%d\n", err);
goto err_out;
}
flow_counter = mlx5_fc_create(mdev, false);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter_id = mlx5_fc_id(flow_counter);
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule;
}
rx->status_drop.group = g;
rx->status_drop.rule = rule;
rx->status_drop_cnt = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
return 0;
err_rule:
mlx5_fc_destroy(mdev, flow_counter);
err_cnt:
mlx5_destroy_flow_group(g);
err_out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(ipsec->mdev,
"Failed to add ipsec rx status pass rule, err=%d\n", err);
goto err_rule;
}
rx->status.rule = rule;
kvfree(spec);
return 0;
err_rule:
kvfree(spec);
return err;
}
void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
esw_ipsec_rx_status_pass_destroy(ipsec, rx);
esw_ipsec_rx_status_drop_destroy(ipsec, rx);
}
int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
int err;
err = esw_ipsec_rx_status_drop_create(ipsec, rx);
if (err)
return err;
err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
if (err)
goto err_pass_create;
return 0;
err_pass_create:
esw_ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr)
{
......@@ -202,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mapped_id;
int err;
err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id,
xa_mk_value(sa_entry->ipsec_obj_id),
XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
if (err)
......@@ -233,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
return 0;
err_header_alloc:
xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id);
return err;
}
......@@ -242,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (sa_entry->rx_mapped_id)
xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
xa_erase_bh(&ipsec->ipsec_obj_id_map,
sa_entry->rx_mapped_id);
}
......@@ -252,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
struct mlx5e_ipsec *ipsec = priv->ipsec;
void *val;
val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
val = xa_load(&ipsec->ipsec_obj_id_map, id);
if (!val)
return -ENOENT;
......@@ -304,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
if (!rpriv || !rpriv->netdev)
if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
......
......@@ -8,11 +8,6 @@ struct mlx5e_ipsec;
struct mlx5e_ipsec_sa_entry;
#ifdef CONFIG_MLX5_ESWITCH
void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx);
int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest);
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr);
int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
......@@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
#else
static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx) {}
static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
return -EINVAL;
}
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
......
......@@ -1463,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
lockdep_assert_held(&esw->mode_lock);
devl_assert_locked(priv_to_devlink(esw->dev));
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
......@@ -1531,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
if (!mlx5_esw_is_fdb_created(esw)) {
ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
......@@ -1554,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
}
}
up_write(&esw->mode_lock);
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
......@@ -1569,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
return;
devl_assert_locked(priv_to_devlink(esw->dev));
down_write(&esw->mode_lock);
/* If driver is unloaded, this function is called twice by remove_one()
* and mlx5_unload(). Prevent the second call.
*/
if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
goto unlock;
return;
esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
......@@ -1603,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
esw->esw_funcs.num_vfs = 0;
else
esw->esw_funcs.num_ec_vfs = 0;
unlock:
up_write(&esw->mode_lock);
}
/* Free resources for corresponding eswitch mode. It is called by devlink
......@@ -1647,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw);
esw->mode = MLX5_ESWITCH_LEGACY;
up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
......@@ -2254,8 +2245,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
if (!mlx5_esw_allowed(esw))
return true;
if (down_read_trylock(&esw->mode_lock) != 0)
if (down_read_trylock(&esw->mode_lock) != 0) {
if (esw->eswitch_operation_in_progress) {
up_read(&esw->mode_lock);
return false;
}
return true;
}
return false;
}
......@@ -2312,7 +2308,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
if (down_write_trylock(&esw->mode_lock) == 0)
return -EINVAL;
if (atomic64_read(&esw->user_count) > 0) {
if (esw->eswitch_operation_in_progress ||
atomic64_read(&esw->user_count) > 0) {
up_write(&esw->mode_lock);
return -EBUSY;
}
......@@ -2320,6 +2317,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
return esw->mode;
}
int mlx5_esw_lock(struct mlx5_eswitch *esw)
{
down_write(&esw->mode_lock);
if (esw->eswitch_operation_in_progress) {
up_write(&esw->mode_lock);
return -EBUSY;
}
return 0;
}
/**
* mlx5_esw_unlock() - Release write lock on esw mode lock
* @esw: eswitch device.
......
......@@ -383,6 +383,7 @@ struct mlx5_eswitch {
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
bool eswitch_operation_in_progress;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
......@@ -827,6 +828,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev);
void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
int mlx5_esw_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
......
......@@ -3653,14 +3653,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct net *devl_net, *netdev_net;
struct mlx5_eswitch *esw;
esw = mlx5_devlink_eswitch_nocheck_get(devlink);
netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
bool ret = false;
return net_eq(devl_net, netdev_net);
mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
if (dev->mlx5e_res.uplink_netdev) {
netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
ret = net_eq(devl_net, netdev_net);
}
mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
return ret;
}
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
......@@ -3733,13 +3737,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
goto unlock;
}
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change mode while devlink traps are active");
err = -EOPNOTSUPP;
goto unlock;
goto skip;
}
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
......@@ -3749,6 +3756,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = -EINVAL;
}
skip:
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
mlx5_esw_unlock(esw);
enable_lag:
......@@ -3759,16 +3769,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct mlx5_eswitch *esw;
int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
up_read(&esw->mode_lock);
return err;
return esw_mode_to_devlink(esw->mode, mode);
}
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
......@@ -3862,11 +3868,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
if (err)
goto out;
if (!err)
esw->offloads.inline_mode = mlx5_mode;
esw->offloads.inline_mode = mlx5_mode;
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
up_write(&esw->mode_lock);
return 0;
......@@ -3878,16 +3888,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{
struct mlx5_eswitch *esw;
int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
up_read(&esw->mode_lock);
return err;
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
......@@ -3969,6 +3975,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
goto unlock;
}
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
......@@ -3982,6 +3991,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
(void)esw_create_offloads_fdb_tables(esw);
}
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
up_write(&esw->mode_lock);
return err;
......@@ -3996,9 +4008,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
up_read(&esw->mode_lock);
return 0;
}
......
......@@ -325,6 +325,29 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
{
struct pci_dev *bridge = dev->pdev->bus->self;
u16 reg16;
int err;
if (!bridge)
return -EOPNOTSUPP;
err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
if (err)
return err;
if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) {
mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n");
return -EOPNOTSUPP;
}
return 0;
}
#endif
static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
......@@ -357,6 +380,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
return false;
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
err = mlx5_check_hotplug_interrupt(dev);
if (err)
return false;
#endif
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return false;
......
......@@ -621,7 +621,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
u8 reserved_at_160[0x20];
u8 inner_esp_spi[0x20];
u8 outer_esp_spi[0x20];
u8 reserved_at_1a0[0x60];
};
......@@ -12001,6 +12001,13 @@ enum {
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
enum {
MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
};
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment