Commit 8812c24d authored by Majd Dibbiny's avatar Majd Dibbiny Committed by Saeed Mahameed

net/mlx5: Add fast unload support in shutdown flow

Adding a support to flush all HW resources with one FW command and
skip all the heavy unload flows of the driver on kernel shutdown.
There's no need to free all the SW context since a new fresh kernel
will be loaded afterwards.

Regarding the FW resources, they should be closed, otherwise we will
have leakage in the FW. To accelerate this flow, we execute one command
in the beginning that tells the FW that the driver isn't going to close
any of the FW resources and asks the FW to clean up everything.
Once the commands complete, it's safe to close the PCI resources and
finish the routine.
Signed-off-by: default avatarMajd Dibbiny <majd@mellanox.com>
Signed-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 4525abea
...@@ -195,3 +195,31 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) ...@@ -195,3 +195,31 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
int force_state;
int ret;
if (!MLX5_CAP_GEN(dev, force_teardown)) {
mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
return -EOPNOTSUPP;
}
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
if (ret)
return ret;
force_state = MLX5_GET(teardown_hca_out, out, force_state);
if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
mlx5_core_err(dev, "teardown with force mode failed\n");
return -EIO;
}
return 0;
}
...@@ -111,14 +111,14 @@ static int in_fatal(struct mlx5_core_dev *dev) ...@@ -111,14 +111,14 @@ static int in_fatal(struct mlx5_core_dev *dev)
return 0; return 0;
} }
void mlx5_enter_error_state(struct mlx5_core_dev *dev) void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
{ {
mutex_lock(&dev->intf_state_mutex); mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto unlock; goto unlock;
mlx5_core_err(dev, "start\n"); mlx5_core_err(dev, "start\n");
if (pci_channel_offline(dev->pdev) || in_fatal(dev)) { if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
trigger_cmd_completions(dev); trigger_cmd_completions(dev);
} }
......
...@@ -1418,7 +1418,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, ...@@ -1418,7 +1418,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__); dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev); mlx5_enter_error_state(dev, false);
mlx5_unload_one(dev, priv, false); mlx5_unload_one(dev, priv, false);
/* In case of kernel call drain the health wq */ /* In case of kernel call drain the health wq */
if (state) { if (state) {
...@@ -1505,14 +1505,42 @@ static const struct pci_error_handlers mlx5_err_handler = { ...@@ -1505,14 +1505,42 @@ static const struct pci_error_handlers mlx5_err_handler = {
.resume = mlx5_pci_resume .resume = mlx5_pci_resume
}; };
static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
{
int ret;
if (!MLX5_CAP_GEN(dev, force_teardown)) {
mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
return -EOPNOTSUPP;
}
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
return -EAGAIN;
}
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
return ret;
}
mlx5_enter_error_state(dev, true);
return 0;
}
static void shutdown(struct pci_dev *pdev) static void shutdown(struct pci_dev *pdev)
{ {
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
int err;
dev_info(&pdev->dev, "Shutdown was called\n"); dev_info(&pdev->dev, "Shutdown was called\n");
/* Notify mlx5 clients that the kernel is being shut down */ /* Notify mlx5 clients that the kernel is being shut down */
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, priv, false); mlx5_unload_one(dev, priv, false);
mlx5_pci_disable_device(dev); mlx5_pci_disable_device(dev);
} }
......
...@@ -83,12 +83,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev); ...@@ -83,12 +83,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param); unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev, void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault); struct mlx5_pagefault *pfault);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev); void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev); int mlx5_sriov_init(struct mlx5_core_dev *dev);
......
...@@ -801,7 +801,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -801,7 +801,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_indirection[0x8]; u8 max_indirection[0x8];
u8 fixed_buffer_size[0x1]; u8 fixed_buffer_size[0x1];
u8 log_max_mrw_sz[0x7]; u8 log_max_mrw_sz[0x7];
u8 reserved_at_110[0x2]; u8 force_teardown[0x1];
u8 reserved_at_111[0x1];
u8 log_max_bsf_list_size[0x6]; u8 log_max_bsf_list_size[0x6];
u8 umr_extended_translation_offset[0x1]; u8 umr_extended_translation_offset[0x1];
u8 null_mkey[0x1]; u8 null_mkey[0x1];
...@@ -3094,18 +3095,25 @@ struct mlx5_ifc_tsar_element_bits { ...@@ -3094,18 +3095,25 @@ struct mlx5_ifc_tsar_element_bits {
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
}; };
enum {
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0,
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1,
};
struct mlx5_ifc_teardown_hca_out_bits { struct mlx5_ifc_teardown_hca_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
u8 syndrome[0x20]; u8 syndrome[0x20];
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x3f];
u8 force_state[0x1];
}; };
enum { enum {
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
}; };
struct mlx5_ifc_teardown_hca_in_bits { struct mlx5_ifc_teardown_hca_in_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment