Commit 6e79bd28 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2023-04-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2023-04-19

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-04-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  Revert "net/mlx5e: Don't use termination table when redundant"
  net/mlx5e: Nullify table pointer when failing to create
  net/mlx5: Use recovery timeout on sync reset flow
  Revert "net/mlx5: Remove "recovery" arg from mlx5_load_one() function"
  net/mlx5e: Fix error flow in representor failing to add vport rx rule
  net/mlx5: Release tunnel device after tc update skb
  net/mlx5: E-switch, Don't destroy indirect table in split rule
  net/mlx5: E-switch, Create per vport table based on devlink encap mode
  net/mlx5e: Release the label when replacing existing ct entry
  net/mlx5e: Don't clone flow post action attributes second time
====================

Link: https://lore.kernel.org/r/20230421015057.355468-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 7ecebee2 081abcac
...@@ -202,7 +202,7 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a ...@@ -202,7 +202,7 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
break; break;
/* On fw_activate action, also driver is reloaded and reinit performed */ /* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
ret = mlx5_load_one_devl_locked(dev, false); ret = mlx5_load_one_devl_locked(dev, true);
break; break;
default: default:
/* Unsupported action should not get to this function */ /* Unsupported action should not get to this function */
......
...@@ -715,5 +715,6 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, ...@@ -715,5 +715,6 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
return; return;
free_skb: free_skb:
dev_put(tc_priv.fwd_dev);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
...@@ -106,22 +106,17 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act, ...@@ -106,22 +106,17 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
} }
struct mlx5e_post_act_handle * struct mlx5e_post_act_handle *
mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr) mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr)
{ {
u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
struct mlx5e_post_act_handle *handle; struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *post_attr;
int err; int err;
handle = kzalloc(sizeof(*handle), GFP_KERNEL); handle = kzalloc(sizeof(*handle), GFP_KERNEL);
post_attr = mlx5_alloc_flow_attr(post_act->ns_type); if (!handle) {
if (!handle || !post_attr) {
kfree(post_attr);
kfree(handle); kfree(handle);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
memcpy(post_attr, attr, attr_sz);
post_attr->chain = 0; post_attr->chain = 0;
post_attr->prio = 0; post_attr->prio = 0;
post_attr->ft = post_act->ft; post_attr->ft = post_act->ft;
...@@ -145,7 +140,6 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at ...@@ -145,7 +140,6 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
return handle; return handle;
err_xarray: err_xarray:
kfree(post_attr);
kfree(handle); kfree(handle);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -164,7 +158,6 @@ mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_han ...@@ -164,7 +158,6 @@ mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_han
if (!IS_ERR_OR_NULL(handle->rule)) if (!IS_ERR_OR_NULL(handle->rule))
mlx5e_tc_post_act_unoffload(post_act, handle); mlx5e_tc_post_act_unoffload(post_act, handle);
xa_erase(&post_act->ids, handle->id); xa_erase(&post_act->ids, handle->id);
kfree(handle->attr);
kfree(handle); kfree(handle);
} }
......
...@@ -19,7 +19,7 @@ void ...@@ -19,7 +19,7 @@ void
mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act); mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act);
struct mlx5e_post_act_handle * struct mlx5e_post_act_handle *
mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr); mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr);
void void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle); mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
......
...@@ -14,10 +14,10 @@ ...@@ -14,10 +14,10 @@
#define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024) #define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = { static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE, .max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
.max_num_groups = 0, /* default num of groups */ .max_num_groups = 0, /* default num of groups */
.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP, .flags = 0,
}; };
struct mlx5e_tc_psample { struct mlx5e_tc_psample {
......
...@@ -920,6 +920,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -920,6 +920,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
zone_rule->rule = rule; zone_rule->rule = rule;
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh); mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
zone_rule->mh = mh; zone_rule->mh = mh;
mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id);
kfree(old_attr); kfree(old_attr);
kvfree(spec); kvfree(spec);
......
...@@ -783,6 +783,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs) ...@@ -783,6 +783,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr); ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) { if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t); err = PTR_ERR(ft->t);
ft->t = NULL;
fs_err(fs, "fail to create promisc table err=%d\n", err); fs_err(fs, "fail to create promisc table err=%d\n", err);
return err; return err;
} }
...@@ -810,7 +811,7 @@ static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs) ...@@ -810,7 +811,7 @@ static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs) static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
{ {
if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table")) if (!fs->promisc.ft.t)
return; return;
mlx5e_del_promisc_rule(fs); mlx5e_del_promisc_rule(fs);
mlx5_destroy_flow_table(fs->promisc.ft.t); mlx5_destroy_flow_table(fs->promisc.ft.t);
...@@ -1490,6 +1491,8 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, ...@@ -1490,6 +1491,8 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs) void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{ {
if (!fs)
return;
debugfs_remove_recursive(fs->dfs_root); debugfs_remove_recursive(fs->dfs_root);
mlx5e_fs_ethtool_free(fs); mlx5e_fs_ethtool_free(fs);
mlx5e_fs_tc_free(fs); mlx5e_fs_tc_free(fs);
......
...@@ -5270,6 +5270,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) ...@@ -5270,6 +5270,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv); mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv); mlx5e_ktls_cleanup(priv);
mlx5e_fs_cleanup(priv->fs); mlx5e_fs_cleanup(priv->fs);
priv->fs = NULL;
} }
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
......
...@@ -828,6 +828,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, ...@@ -828,6 +828,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{ {
mlx5e_fs_cleanup(priv->fs); mlx5e_fs_cleanup(priv->fs);
priv->fs = NULL;
} }
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
...@@ -994,6 +995,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -994,6 +995,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL; priv->rx_res = NULL;
err_free_fs: err_free_fs:
mlx5e_fs_cleanup(priv->fs); mlx5e_fs_cleanup(priv->fs);
priv->fs = NULL;
return err; return err;
} }
......
...@@ -11,7 +11,7 @@ struct mlx5_vport_key { ...@@ -11,7 +11,7 @@ struct mlx5_vport_key {
u16 prio; u16 prio;
u16 vport; u16 vport;
u16 vhca_id; u16 vhca_id;
const struct esw_vport_tbl_namespace *vport_ns; struct esw_vport_tbl_namespace *vport_ns;
} __packed; } __packed;
struct mlx5_vport_table { struct mlx5_vport_table {
...@@ -21,6 +21,14 @@ struct mlx5_vport_table { ...@@ -21,6 +21,14 @@ struct mlx5_vport_table {
struct mlx5_vport_key key; struct mlx5_vport_key key;
}; };
static void
esw_vport_tbl_init(struct mlx5_eswitch *esw, struct esw_vport_tbl_namespace *ns)
{
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
ns->flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
}
static struct mlx5_flow_table * static struct mlx5_flow_table *
esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns, esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
const struct esw_vport_tbl_namespace *vport_ns) const struct esw_vport_tbl_namespace *vport_ns)
...@@ -80,6 +88,7 @@ mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr ...@@ -80,6 +88,7 @@ mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
u32 hkey; u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock); mutex_lock(&esw->fdb_table.offloads.vports.lock);
esw_vport_tbl_init(esw, attr->vport_ns);
hkey = flow_attr_to_vport_key(esw, attr, &skey); hkey = flow_attr_to_vport_key(esw, attr, &skey);
e = esw_vport_tbl_lookup(esw, &skey, hkey); e = esw_vport_tbl_lookup(esw, &skey, hkey);
if (e) { if (e) {
...@@ -127,6 +136,7 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr ...@@ -127,6 +136,7 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
u32 hkey; u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock); mutex_lock(&esw->fdb_table.offloads.vports.lock);
esw_vport_tbl_init(esw, attr->vport_ns);
hkey = flow_attr_to_vport_key(esw, attr, &key); hkey = flow_attr_to_vport_key(esw, attr, &key);
e = esw_vport_tbl_lookup(esw, &key, hkey); e = esw_vport_tbl_lookup(esw, &key, hkey);
if (!e || --e->num_rules) if (!e || --e->num_rules)
......
...@@ -674,7 +674,7 @@ struct mlx5_vport_tbl_attr { ...@@ -674,7 +674,7 @@ struct mlx5_vport_tbl_attr {
u32 chain; u32 chain;
u16 prio; u16 prio;
u16 vport; u16 vport;
const struct esw_vport_tbl_namespace *vport_ns; struct esw_vport_tbl_namespace *vport_ns;
}; };
struct mlx5_flow_table * struct mlx5_flow_table *
......
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE, .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
.flags = 0, .flags = 0,
...@@ -760,7 +760,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -760,7 +760,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
kfree(dest); kfree(dest);
return rule; return rule;
err_chain_src_rewrite: err_chain_src_rewrite:
esw_put_dest_tables_loop(esw, attr, 0, i);
mlx5_esw_vporttbl_put(esw, &fwd_attr); mlx5_esw_vporttbl_put(esw, &fwd_attr);
err_get_fwd: err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
...@@ -803,7 +802,6 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -803,7 +802,6 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (fwd_rule) { if (fwd_rule) {
mlx5_esw_vporttbl_put(esw, &fwd_attr); mlx5_esw_vporttbl_put(esw, &fwd_attr);
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
} else { } else {
if (split) if (split)
mlx5_esw_vporttbl_put(esw, &fwd_attr); mlx5_esw_vporttbl_put(esw, &fwd_attr);
......
...@@ -210,18 +210,6 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, ...@@ -210,18 +210,6 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
return (port_mask & port_value) == MLX5_VPORT_UPLINK; return (port_mask & port_value) == MLX5_VPORT_UPLINK;
} }
static bool
mlx5_eswitch_is_push_vlan_no_cap(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act)
{
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
!(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
return true;
return false;
}
bool bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
...@@ -237,7 +225,10 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, ...@@ -237,7 +225,10 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
(!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port)) (!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false; return false;
if (mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act)) /* push vlan on RX */
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
!(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
return true; return true;
/* hairpin */ /* hairpin */
...@@ -261,31 +252,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, ...@@ -261,31 +252,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_act term_tbl_act = {}; struct mlx5_flow_act term_tbl_act = {};
struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_handle *rule = NULL;
bool term_table_created = false; bool term_table_created = false;
bool is_push_vlan_on_rx;
int num_vport_dests = 0; int num_vport_dests = 0;
int i, curr_dest; int i, curr_dest;
is_push_vlan_on_rx = mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act);
mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act); mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act);
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < num_dest; i++) { for (i = 0; i < num_dest; i++) {
struct mlx5_termtbl_handle *tt; struct mlx5_termtbl_handle *tt;
bool hairpin = false;
/* only vport destinations can be terminated */ /* only vport destinations can be terminated */
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT) if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue; continue;
if (attr->dests[num_vport_dests].rep &&
attr->dests[num_vport_dests].rep->vport == MLX5_VPORT_UPLINK)
hairpin = true;
if (!is_push_vlan_on_rx && !hairpin) {
num_vport_dests++;
continue;
}
if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) { if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat; term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
...@@ -333,9 +312,6 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, ...@@ -333,9 +312,6 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) { for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl; struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
if (!tt)
continue;
attr->dests[curr_dest].termtbl = NULL; attr->dests[curr_dest].termtbl = NULL;
/* search for the destination associated with the /* search for the destination associated with the
......
...@@ -167,7 +167,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) ...@@ -167,7 +167,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (mlx5_health_wait_pci_up(dev)) if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else else
mlx5_load_one(dev); mlx5_load_one(dev, true);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
...@@ -499,7 +499,7 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev) ...@@ -499,7 +499,7 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
err = fw_reset->ret; err = fw_reset->ret;
if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) { if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
mlx5_unload_one_devl_locked(dev, false); mlx5_unload_one_devl_locked(dev, false);
mlx5_load_one_devl_locked(dev, false); mlx5_load_one_devl_locked(dev, true);
} }
out: out:
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
......
...@@ -1509,13 +1509,13 @@ int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery) ...@@ -1509,13 +1509,13 @@ int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
return err; return err;
} }
int mlx5_load_one(struct mlx5_core_dev *dev) int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
{ {
struct devlink *devlink = priv_to_devlink(dev); struct devlink *devlink = priv_to_devlink(dev);
int ret; int ret;
devl_lock(devlink); devl_lock(devlink);
ret = mlx5_load_one_devl_locked(dev, false); ret = mlx5_load_one_devl_locked(dev, recovery);
devl_unlock(devlink); devl_unlock(devlink);
return ret; return ret;
} }
...@@ -1912,7 +1912,8 @@ static void mlx5_pci_resume(struct pci_dev *pdev) ...@@ -1912,7 +1912,8 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
mlx5_pci_trace(dev, "Enter, loading driver..\n"); mlx5_pci_trace(dev, "Enter, loading driver..\n");
err = mlx5_load_one(dev); err = mlx5_load_one(dev, false);
if (!err) if (!err)
devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter, devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter,
DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
...@@ -2003,7 +2004,7 @@ static int mlx5_resume(struct pci_dev *pdev) ...@@ -2003,7 +2004,7 @@ static int mlx5_resume(struct pci_dev *pdev)
{ {
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
return mlx5_load_one(dev); return mlx5_load_one(dev, false);
} }
static const struct pci_device_id mlx5_core_pci_table[] = { static const struct pci_device_id mlx5_core_pci_table[] = {
......
...@@ -321,7 +321,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev); ...@@ -321,7 +321,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev); void mlx5_uninit_one(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend); void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend); void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
int mlx5_load_one(struct mlx5_core_dev *dev); int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery); int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 function_id, int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 function_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment