Commit 9e50727f authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2018-10-03' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2018-10-03

mlx5 core driver and ethernet netdev updates, please note there is a small
devlink releated update to allow extack argument to eswitch operations.

From Eli Britstein,
1) devlink: Add extack argument to the eswitch related operations
2) net/mlx5e: E-Switch, return extack messages for failures in the e-switch devlink callbacks
3) net/mlx5e: Add extack messages for TC offload failures

From Eran Ben Elisha,
4) mlx5e: Add counter for aRFS rule insertion failures

From Feras Daoud
5) Fast teardown support for mlx5 device
This change introduces the enhanced version of the "Force teardown" that
allows SW to perform teardown in a faster way without the need to reclaim
all the FW pages.
Fast teardown provides the following advantages:
    1- Fix a FW race condition that could cause command timeout
    2- Avoid moving to polling mode
    3- Close the vport to prevent PCI ACK to be sent without been scatter
    to memory
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f0e834e1 fcd29ad1
......@@ -521,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return 0;
}
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
int rc = 0;
......
......@@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
#else
......
......@@ -3144,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
}
static int
liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct lio_devlink_priv *priv;
struct octeon_device *oct;
......
......@@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq, err);
priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
mlx5e_dbg(HW, priv,
"%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq,
tuple->ip_proto, err);
}
out:
......
......@@ -93,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
......@@ -170,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
......@@ -1161,6 +1163,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
};
static const struct counter_desc sq_stats_desc[] = {
......
......@@ -106,6 +106,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_busy;
u64 rx_cache_waive;
u64 rx_congst_umr;
u64 rx_arfs_err;
u64 ch_events;
u64 ch_poll;
u64 ch_arm;
......@@ -202,6 +203,7 @@ struct mlx5e_rq_stats {
u64 cache_busy;
u64 cache_waive;
u64 congst_umr;
u64 arfs_err;
};
struct mlx5e_sq_stats {
......
......@@ -269,12 +269,15 @@ struct mlx5_esw_flow_attr {
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
......
......@@ -810,29 +810,35 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
return flow_rule;
}
static int esw_offloads_start(struct mlx5_eswitch *esw)
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
if (esw->mode != SRIOV_LEGACY) {
esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
NL_SET_ERR_MSG_MOD(extack,
"Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
}
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
if (err) {
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err1)
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
}
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
num_vfs,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
esw_warn(esw->dev, "Inline mode is different between vports\n");
NL_SET_ERR_MSG_MOD(extack,
"Inline mode is different between vports");
}
}
return err;
......@@ -973,17 +979,20 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
return err;
}
static int esw_offloads_stop(struct mlx5_eswitch *esw)
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
if (err1)
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
}
}
/* enable back PF RoCE */
......@@ -1092,7 +1101,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
return 0;
}
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u16 cur_mlx5_mode, mlx5_mode = 0;
......@@ -1111,9 +1121,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
return 0;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
return esw_offloads_start(dev->priv.eswitch);
return esw_offloads_start(dev->priv.eswitch, extack);
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
return esw_offloads_stop(dev->priv.eswitch);
return esw_offloads_stop(dev->priv.eswitch, extack);
else
return -EINVAL;
}
......@@ -1130,7 +1140,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
}
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
......@@ -1147,14 +1158,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
return 0;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2:
esw_warn(dev, "Inline mode can't be set\n");
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
return -EOPNOTSUPP;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
if (esw->offloads.num_flows > 0) {
esw_warn(dev, "Can't set inline mode when flows are configured\n");
NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured");
return -EOPNOTSUPP;
}
......@@ -1165,8 +1177,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
for (vport = 1; vport < esw->enabled_vports; vport++) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) {
esw_warn(dev, "Failed to set min inline on vport %d\n",
vport);
NL_SET_ERR_MSG_MOD(extack,
"Failed to set min inline on vport");
goto revert_inline_mode;
}
}
......@@ -1232,7 +1244,8 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
return 0;
}
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
......@@ -1259,7 +1272,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
return 0;
if (esw->offloads.num_flows > 0) {
esw_warn(dev, "Can't set encapsulation when flows are configured\n");
NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured");
return -EOPNOTSUPP;
}
......@@ -1268,7 +1282,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
esw->offloads.encap = encap;
err = esw_create_offloads_fast_fdb_table(esw);
if (err) {
esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
NL_SET_ERR_MSG_MOD(extack,
"Failed re-creating fast FDB table");
esw->offloads.encap = !encap;
(void)esw_create_offloads_fast_fdb_table(esw);
}
......
......@@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
if (ret)
return ret;
force_state = MLX5_GET(teardown_hca_out, out, force_state);
force_state = MLX5_GET(teardown_hca_out, out, state);
if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n");
return -EIO;
......@@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
int state;
int ret;
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
return -EOPNOTSUPP;
}
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
MLX5_SET(teardown_hca_in, in, profile,
MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (ret)
return ret;
state = MLX5_GET(teardown_hca_out, out, state);
if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
mlx5_core_warn(dev, "teardown with fast mode failed\n");
return -EIO;
}
mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
/* Loop until device state turns to disable */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
break;
cond_resched();
} while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
}
return 0;
}
enum mlxsw_reg_mcc_instruction {
MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
......
......@@ -58,23 +58,26 @@ enum {
MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10
};
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
MLX5_NIC_IFC_NO_DRAM_NIC = 2,
MLX5_NIC_IFC_INVALID = 3
};
enum {
MLX5_DROP_NEW_HEALTH_WORK,
MLX5_DROP_NEW_RECOVERY_WORK,
};
static u8 get_nic_state(struct mlx5_core_dev *dev)
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
{
u32 cur_cmdq_addr_l_sz;
cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
state << MLX5_NIC_IFC_OFFSET,
&dev->iseg->cmdq_addr_l_sz);
}
static void trigger_cmd_completions(struct mlx5_core_dev *dev)
{
unsigned long flags;
......@@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
......@@ -133,7 +136,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
u8 nic_interface = get_nic_state(dev);
u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
......@@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
nic_state = get_nic_state(dev);
nic_state = mlx5_get_nic_state(dev);
if (nic_state == MLX5_NIC_IFC_INVALID) {
dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
return;
......
......@@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = {
static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
{
int ret;
bool fast_teardown = false, force_teardown = false;
int ret = 1;
fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
force_teardown = MLX5_CAP_GEN(dev, force_teardown);
mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
if (!MLX5_CAP_GEN(dev, force_teardown)) {
mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
if (!fast_teardown && !force_teardown)
return -EOPNOTSUPP;
}
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
......@@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_fast_teardown_hca(dev);
if (!ret)
goto succeed;
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
mlx5_start_health_poll(dev);
return ret;
}
if (!ret)
goto succeed;
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
mlx5_start_health_poll(dev);
return ret;
succeed:
mlx5_enter_error_state(dev, true);
/* Some platforms requiring freeing the IRQ's in the shutdown
......
......@@ -95,6 +95,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
......@@ -214,4 +216,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev);
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
MLX5_NIC_IFC_NO_DRAM_NIC = 2,
MLX5_NIC_IFC_INVALID = 3
};
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
#endif /* __MLX5_CORE_H__ */
......@@ -177,7 +177,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return nfp_app_eswitch_mode_get(pf->app, mode);
}
static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
int ret;
......
......@@ -504,6 +504,10 @@ struct health_buffer {
__be16 ext_synd;
};
enum mlx5_cmd_addr_l_sz_offset {
MLX5_NIC_IFC_OFFSET = 8,
};
struct mlx5_init_seg {
__be32 fw_rev;
__be32 cmdif_rev_fw_sub;
......
......@@ -896,7 +896,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_mkey[0x6];
u8 reserved_at_f0[0x8];
u8 dump_fill_mkey[0x1];
u8 reserved_at_f9[0x3];
u8 reserved_at_f9[0x2];
u8 fast_teardown[0x1];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
......@@ -3352,12 +3353,13 @@ struct mlx5_ifc_teardown_hca_out_bits {
u8 reserved_at_40[0x3f];
u8 force_state[0x1];
u8 state[0x1];
};
enum {
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2,
};
struct mlx5_ifc_teardown_hca_in_bits {
......
......@@ -451,11 +451,14 @@ struct devlink_ops {
u32 *p_cur, u32 *p_max);
int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
int (*eswitch_mode_set)(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode,
struct netlink_ext_ack *extack);
int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode);
int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode);
int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode,
struct netlink_ext_ack *extack);
};
static inline void *devlink_priv(struct devlink *devlink)
......
......@@ -1626,7 +1626,7 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
if (!ops->eswitch_mode_set)
return -EOPNOTSUPP;
mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
err = ops->eswitch_mode_set(devlink, mode);
err = ops->eswitch_mode_set(devlink, mode, info->extack);
if (err)
return err;
}
......@@ -1636,7 +1636,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
return -EOPNOTSUPP;
inline_mode = nla_get_u8(
info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
err = ops->eswitch_inline_mode_set(devlink, inline_mode);
err = ops->eswitch_inline_mode_set(devlink, inline_mode,
info->extack);
if (err)
return err;
}
......@@ -1645,7 +1646,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
if (!ops->eswitch_encap_mode_set)
return -EOPNOTSUPP;
encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
err = ops->eswitch_encap_mode_set(devlink, encap_mode);
err = ops->eswitch_encap_mode_set(devlink, encap_mode,
info->extack);
if (err)
return err;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment