Commit f2b77012 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:

====================
mlx5-next 2022-22-02

The following PR includes updates to mlx5-next branch:

Headlines:
==========

1) Jakub cleans up unused static inline functions

2) I did some low level firmware command interface return status changes to
provide the caller with full visibility on the error/status returned by
the Firmware.

3) Use the new command interface in RDMA DEVX usecases to avoid flooding
dmesg with some "expected" user error prone use cases.

4) Moshe also uses the new command interface to grab the specific error
code from MFRL register command to provide the exact error reason for
why SW reset couldn't perform internally in FW.

5) From Mark Bloch: Lag, drop packets in hardware when possible

In active-backup mode the inactive interface's packets are dropped by the
bond device. In switchdev where TC rules are offloaded to the FDB
this can lead to packets being hit in the FDB where without offload
they would have been dropped before reaching TC rules in the kernel.

Create a drop rule to make sure packets on inactive ports are dropped
before reaching the FDB.

Listen on NETDEV_CHANGEUPPER / NETDEV_CHANGEINFODATA events and record
the inactive state and offload accordingly.

* 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
  net/mlx5: Add clarification on sync reset failure
  net/mlx5: Add reset_state field to MFRL register
  RDMA/mlx5: Use new command interface API
  net/mlx5: cmdif, Refactor error handling and reporting of async commands
  net/mlx5: Use mlx5_cmd_do() in core create_{cq,dct}
  net/mlx5: cmdif, Add new api for command execution
  net/mlx5: cmdif, cmd_check refactoring
  net/mlx5: cmdif, Return value improvements
  net/mlx5: Lag, offload active-backup drops to hardware
  net/mlx5: Lag, record inactive state of bond device
  net/mlx5: Lag, don't use magic numbers for ports
  net/mlx5: Lag, use local variable already defined to access E-Switch
  net/mlx5: E-switch, add drop rule support to ingress ACL
  net/mlx5: E-switch, remove special uplink ingress ACL handling
  net/mlx5: E-Switch, reserve and use same uplink metadata across ports
  net/mlx5: Add ability to insert to specific flow group
  mlx5: remove unused static inlines
====================

Link: https://lore.kernel.org/r/20220223233930.319301-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 0b9e69e1 45fee8ed
......@@ -1055,7 +1055,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
int cmd_out_len = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
void *cmd_out;
int err;
int err, err2;
int uid;
c = devx_ufile2uctx(attrs);
......@@ -1076,14 +1076,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
return PTR_ERR(cmd_out);
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec(dev->mdev, cmd_in,
err = mlx5_cmd_do(dev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
cmd_out, cmd_out_len);
if (err)
if (err && err != -EREMOTEIO)
return err;
return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
cmd_out_len);
return err2 ?: err;
}
static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
......@@ -1457,7 +1459,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
u16 obj_type = 0;
int err;
int err, err2 = 0;
int uid;
u32 obj_id;
u16 opcode;
......@@ -1497,15 +1499,18 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
!is_apu_cq(dev, cmd_in)) {
obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp;
err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
err = mlx5_create_cq(dev->mdev, &obj->core_cq,
cmd_in, cmd_in_len, cmd_out,
cmd_out_len);
} else {
err = mlx5_cmd_exec(dev->mdev, cmd_in,
cmd_in_len,
err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
cmd_out, cmd_out_len);
}
if (err == -EREMOTEIO)
err2 = uverbs_copy_to(attrs,
MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
cmd_out, cmd_out_len);
if (err)
goto obj_free;
......@@ -1548,7 +1553,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
sizeof(out));
obj_free:
kfree(obj);
return err;
return err2 ?: err;
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
......@@ -1563,7 +1568,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
void *cmd_out;
int err;
int err, err2;
int uid;
if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
......@@ -1586,14 +1591,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
devx_set_umem_valid(cmd_in);
err = mlx5_cmd_exec(mdev->mdev, cmd_in,
err = mlx5_cmd_do(mdev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
cmd_out, cmd_out_len);
if (err)
if (err && err != -EREMOTEIO)
return err;
return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
cmd_out, cmd_out_len);
return err2 ?: err;
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
......@@ -1607,7 +1614,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
void *cmd_out;
int err;
int err, err2;
int uid;
struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
......@@ -1629,14 +1636,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
return PTR_ERR(cmd_out);
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec(mdev->mdev, cmd_in,
err = mlx5_cmd_do(mdev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
cmd_out, cmd_out_len);
if (err)
if (err && err != -EREMOTEIO)
return err;
return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
cmd_out, cmd_out_len);
return err2 ?: err;
}
struct devx_async_event_queue {
......
......@@ -140,6 +140,19 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
{
if (status == -ENXIO) /* core driver is not available */
return;
mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
if (status != -EREMOTEIO) /* driver specific failure */
return;
/* Failed in FW, print cmd out failure details */
mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
}
static void create_mkey_callback(int status, struct mlx5_async_work *context)
{
struct mlx5_ib_mr *mr =
......@@ -149,7 +162,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
unsigned long flags;
if (status) {
mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
create_mkey_warn(dev, status, mr->out);
kfree(mr);
spin_lock_irqsave(&ent->lock, flags);
ent->pending--;
......
......@@ -4465,6 +4465,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
sizeof(out));
err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out);
if (err)
return err;
resp.dctn = qp->dct.mdct.mqp.qpn;
......
......@@ -220,7 +220,7 @@ int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
init_completion(&dct->drained);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen);
if (err)
return err;
......
......@@ -86,7 +86,8 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
/* Callers must verify outbox status in case of err */
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
......@@ -101,7 +102,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
memset(out, 0, outlen);
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
err = mlx5_cmd_do(dev, in, inlen, out, outlen);
if (err)
return err;
......@@ -148,6 +149,16 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
mlx5_cmd_exec_in(dev, destroy_cq, din);
return err;
}
EXPORT_SYMBOL(mlx5_create_cq);
/* oubox is checked and err val is normalized */
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
{
int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen);
return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
......
......@@ -100,15 +100,11 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
}
net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack);
if (err)
goto out;
err = mlx5_fw_reset_wait_reset_done(dev);
out:
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
return err;
return mlx5_fw_reset_wait_reset_done(dev);
}
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
......
......@@ -139,15 +139,6 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return true;
}
static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
{
#ifdef CONFIG_MLX5_EN_IPSEC
return mlx5e_ipsec_is_tx_flow(&state->ipsec);
#else
return false;
#endif
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
struct mlx5e_accel_tx_state *state)
{
......
......@@ -92,6 +92,7 @@ static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
NULL, &flow_act, NULL, 0);
......@@ -117,6 +118,36 @@ static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
vport->ingress.offloads.modify_metadata_rule = NULL;
}
static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
int err = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_act.fg = vport->ingress.offloads.drop_grp;
flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
}
vport->ingress.offloads.drop_rule = flow_rule;
out:
return err;
}
static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!vport->ingress.offloads.drop_rule)
return;
mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
vport->ingress.offloads.drop_rule = NULL;
}
static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
......@@ -154,6 +185,7 @@ static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
{
esw_acl_ingress_allow_rule_destroy(vport);
esw_acl_ingress_mod_metadata_destroy(esw, vport);
esw_acl_ingress_src_port_drop_destroy(esw, vport);
}
static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
......@@ -170,10 +202,29 @@ static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
if (!flow_group_in)
return -ENOMEM;
if (vport->vport == MLX5_VPORT_UPLINK) {
/* This group can hold an FTE to drop all traffic.
* Need in case LAG is enabled.
*/
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, ret);
goto drop_err;
}
vport->ingress.offloads.drop_grp = g;
flow_index++;
}
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
/* This group is to hold FTE to match untagged packets when prio_tag
* is enabled.
*/
memset(flow_group_in, 0, inlen);
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in,
......@@ -221,6 +272,11 @@ static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
prio_tag_err:
if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
vport->ingress.offloads.drop_grp = NULL;
}
drop_err:
kvfree(flow_group_in);
return ret;
}
......@@ -236,6 +292,11 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
if (vport->ingress.offloads.drop_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
vport->ingress.offloads.drop_grp = NULL;
}
}
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
......@@ -252,6 +313,8 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
if (vport->vport == MLX5_VPORT_UPLINK)
num_ftes++;
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
......@@ -320,3 +383,27 @@ int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_n
vport->metadata = vport->default_metadata;
return err;
}
int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport)) {
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
return PTR_ERR(vport);
}
return esw_acl_ingress_src_port_drop_create(esw, vport);
}
void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
if (WARN_ON_ONCE(IS_ERR(vport))) {
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
return;
}
esw_acl_ingress_src_port_drop_destroy(esw, vport);
}
......@@ -6,6 +6,7 @@
#include "eswitch.h"
#ifdef CONFIG_MLX5_ESWITCH
/* Eswitch acl egress external APIs */
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
......@@ -25,5 +26,19 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
u32 metadata);
void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
static void
mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw,
u16 vport_num)
{}
static int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw,
u16 vport_num)
{
return 0;
}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */
......@@ -113,8 +113,11 @@ struct vport_ingress {
* packet with metadata.
*/
struct mlx5_flow_group *metadata_allmatch_grp;
/* Optional group to add a drop all rule */
struct mlx5_flow_group *drop_grp;
struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule;
struct mlx5_flow_handle *drop_rule;
} offloads;
};
......
......@@ -2379,60 +2379,6 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
}
static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
struct mlx5_eswitch *esw;
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_namespace *ns;
struct mlx5_vport *vport;
int err;
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL);
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK);
if (master) {
esw = master->priv.eswitch;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1);
MLX5_SET(set_flow_table_root_in, in, table_vport_number,
MLX5_VPORT_UPLINK);
ns = mlx5_get_flow_vport_acl_namespace(master,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
vport->index);
root = find_root(&ns->node);
mutex_lock(&root->chain_lock);
MLX5_SET(set_flow_table_root_in, in,
table_eswitch_owner_vhca_id_valid, 1);
MLX5_SET(set_flow_table_root_in, in,
table_eswitch_owner_vhca_id,
MLX5_CAP_GEN(master, vhca_id));
MLX5_SET(set_flow_table_root_in, in, table_id,
root->root_ft->id);
} else {
esw = slave->priv.eswitch;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
ns = mlx5_get_flow_vport_acl_namespace(slave,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
vport->index);
root = find_root(&ns->node);
mutex_lock(&root->chain_lock);
MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id);
}
err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
mutex_unlock(&root->chain_lock);
return err;
}
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave)
{
......@@ -2614,15 +2560,10 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
{
int err;
err = esw_set_uplink_slave_ingress_root(master_esw->dev,
slave_esw->dev);
if (err)
return -EINVAL;
err = esw_set_slave_root_fdb(master_esw->dev,
slave_esw->dev);
if (err)
goto err_fdb;
return err;
err = esw_set_master_egress_rule(master_esw->dev,
slave_esw->dev);
......@@ -2634,9 +2575,6 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
err_acl:
esw_set_slave_root_fdb(NULL, slave_esw->dev);
err_fdb:
esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
return err;
}
......@@ -2645,7 +2583,6 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
{
esw_unset_master_egress_rule(master_esw->dev);
esw_set_slave_root_fdb(NULL, slave_esw->dev);
esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
......@@ -2842,6 +2779,19 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
#define MLX5_ESW_METADATA_RSVD_UPLINK 1
/* Share the same metadata for uplink's. This is fine because:
* (a) In shared FDB mode (LAG) both uplink's are treated the
* same and tagged with the same metadata.
* (b) In non shared FDB mode, packets from physical port0
* cannot hit eswitch of PF1 and vice versa.
*/
static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
{
return MLX5_ESW_METADATA_RSVD_UPLINK;
}
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
......@@ -2856,8 +2806,10 @@ u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
return 0;
/* Metadata is 4 bits of PFNUM and 12 bits of unique id */
/* Use only non-zero vport_id (1-4095) for all PF's */
id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
/* Use only non-zero vport_id (2-4095) for all PF's */
id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
MLX5_ESW_METADATA_RSVD_UPLINK + 1,
vport_end_ida, GFP_KERNEL);
if (id < 0)
return 0;
id = (pf_num << ESW_VPORT_BITS) | id;
......@@ -2875,7 +2827,11 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (vport->vport == MLX5_VPORT_UPLINK)
vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
else
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
}
......@@ -2886,6 +2842,9 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
if (!vport->default_metadata)
return;
if (vport->vport == MLX5_VPORT_UPLINK)
return;
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
......
......@@ -1696,6 +1696,7 @@ static void free_match_list(struct match_list *head, bool ft_locked)
static int build_match_list(struct match_list *match_head,
struct mlx5_flow_table *ft,
const struct mlx5_flow_spec *spec,
struct mlx5_flow_group *fg,
bool ft_locked)
{
struct rhlist_head *tmp, *list;
......@@ -1710,6 +1711,9 @@ static int build_match_list(struct match_list *match_head,
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
if (fg && fg != g)
continue;
if (unlikely(!tree_get_node(&g->node)))
continue;
......@@ -1889,6 +1893,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!check_valid_spec(spec))
return ERR_PTR(-EINVAL);
if (flow_act->fg && ft->autogroup.active)
return ERR_PTR(-EINVAL);
for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
......@@ -1898,7 +1905,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
version = atomic_read(&ft->node.version);
/* Collect all fgs which has a matching match_criteria */
err = build_match_list(&match_head, ft, spec, take_write);
err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
if (err) {
if (take_write)
up_write_ref_node(&ft->node, false);
......
......@@ -57,7 +57,8 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
}
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
u8 *reset_type, u8 *reset_state)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
......@@ -71,25 +72,67 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *r
*reset_level = MLX5_GET(mfrl_reg, out, reset_level);
if (reset_type)
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
if (reset_state)
*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
return mlx5_reg_mfrl_query(dev, reset_level, reset_type);
return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
}
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel)
static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
struct netlink_ext_ack *extack)
{
u8 reset_state;
if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
goto out;
switch (reset_state) {
case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION:
case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS:
NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered");
return -EBUSY;
case MLX5_MFRL_REG_RESET_STATE_TIMEOUT:
NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout");
return -ETIMEDOUT;
case MLX5_MFRL_REG_RESET_STATE_NACK:
NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset");
return -EPERM;
}
out:
NL_SET_ERR_MSG_MOD(extack, "Sync reset failed");
return -EIO;
}
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
struct netlink_ext_ack *extack)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
int err;
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true);
if (err)
MLX5_SET(mfrl_reg, in, reset_level, MLX5_MFRL_REG_RESET_LEVEL3);
MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, 1);
err = mlx5_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MFRL, 0, 1, false);
if (!err)
return 0;
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
return err;
if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state))
return mlx5_fw_reset_get_reset_state_err(dev, extack);
NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed");
return mlx5_cmd_check(dev, err, in, out);
}
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
......
......@@ -9,7 +9,8 @@
void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel);
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
struct netlink_ext_ack *extack);
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
......
......@@ -31,15 +31,22 @@
*/
#include <linux/netdevice.h>
#include <net/bonding.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "esw/acl/ofld.h"
#include "lag.h"
#include "mp.h"
enum {
MLX5_LAG_EGRESS_PORT_1 = 1,
MLX5_LAG_EGRESS_PORT_2,
};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
......@@ -193,15 +200,71 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
tracker->netdev_state[MLX5_LAG_P2].link_up;
*port1 = 1;
*port2 = 2;
*port1 = MLX5_LAG_EGRESS_PORT_1;
*port2 = MLX5_LAG_EGRESS_PORT_2;
if ((!p1en && !p2en) || (p1en && p2en))
return;
if (p1en)
*port2 = 1;
*port2 = MLX5_LAG_EGRESS_PORT_1;
else
*port1 = 2;
*port1 = MLX5_LAG_EGRESS_PORT_2;
}
static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{
return ldev->pf[MLX5_LAG_P1].has_drop || ldev->pf[MLX5_LAG_P2].has_drop;
}
static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
if (!ldev->pf[i].has_drop)
continue;
mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
MLX5_VPORT_UPLINK);
ldev->pf[i].has_drop = false;
}
}
static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct mlx5_core_dev *inactive;
u8 v2p_port1, v2p_port2;
int inactive_idx;
int err;
/* First delete the current drop rule so there won't be any dropped
* packets
*/
mlx5_lag_drop_rule_cleanup(ldev);
if (!ldev->tracker.has_inactive)
return;
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, &v2p_port2);
if (v2p_port1 == MLX5_LAG_EGRESS_PORT_1) {
inactive = dev1;
inactive_idx = MLX5_LAG_P2;
} else {
inactive = dev0;
inactive_idx = MLX5_LAG_P1;
}
err = mlx5_esw_acl_ingress_vport_drop_rule_create(inactive->priv.eswitch,
MLX5_VPORT_UPLINK);
if (!err)
ldev->pf[inactive_idx].has_drop = true;
else
mlx5_core_err(inactive,
"Failed to create lag drop rule, error: %d", err);
}
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
......@@ -238,6 +301,10 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
}
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
!(ldev->flags & MLX5_LAG_FLAG_ROCE))
mlx5_lag_drop_rule_setup(ldev, tracker);
}
static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
......@@ -339,6 +406,10 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
return err;
}
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
!roce_lag)
mlx5_lag_drop_rule_setup(ldev, tracker);
ldev->flags |= flags;
ldev->shared_fdb = shared_fdb;
return 0;
......@@ -347,6 +418,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
u8 flags = ldev->flags;
......@@ -356,8 +428,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
mlx5_lag_mp_reset(ldev);
if (ldev->shared_fdb) {
mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch,
ldev->pf[MLX5_LAG_P2].dev->priv.eswitch);
mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
dev1->priv.eswitch);
ldev->shared_fdb = false;
}
......@@ -372,11 +444,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
} else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
mlx5_lag_port_sel_destroy(ldev);
return err;
}
return err;
if (flags & MLX5_LAG_FLAG_HASH_BASED)
mlx5_lag_port_sel_destroy(ldev);
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);
return 0;
}
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
......@@ -613,6 +689,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL;
bool is_bonded, is_in_lag, mode_supported;
bool has_inactive = 0;
struct slave *slave;
int bond_status = 0;
int num_slaves = 0;
int changed = 0;
......@@ -632,8 +710,12 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
if (idx >= 0)
if (idx >= 0) {
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
bond_status |= (1 << idx);
}
num_slaves++;
}
......@@ -648,6 +730,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
tracker->hash_type = lag_upper_info->hash_type;
}
tracker->has_inactive = has_inactive;
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
......@@ -704,6 +787,38 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 1;
}
static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
struct net_device *ndev)
{
struct net_device *ndev_tmp;
struct slave *slave;
bool has_inactive = 0;
int idx;
if (!netif_is_lag_master(ndev))
return 0;
rcu_read_lock();
for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
if (idx < 0)
continue;
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
}
rcu_read_unlock();
if (tracker->has_inactive == has_inactive)
return 0;
tracker->has_inactive = has_inactive;
return 1;
}
static int mlx5_lag_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
......@@ -712,7 +827,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
struct mlx5_lag *ldev;
int changed = 0;
if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
if (event != NETDEV_CHANGEUPPER &&
event != NETDEV_CHANGELOWERSTATE &&
event != NETDEV_CHANGEINFODATA)
return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb);
......@@ -728,6 +845,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
ndev, ptr);
break;
case NETDEV_CHANGEINFODATA:
changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
break;
}
ldev->tracker = tracker;
......
......@@ -28,6 +28,7 @@ enum {
struct lag_func {
struct mlx5_core_dev *dev;
struct net_device *netdev;
bool has_drop;
};
/* Used for collection of netdev event info. */
......@@ -35,6 +36,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
unsigned int has_inactive:1;
enum netdev_lag_hash hash_type;
};
......
......@@ -50,7 +50,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
struct lag_tracker tracker;
struct lag_tracker tracker = {};
if (!__mlx5_lag_is_multipath(ldev))
return;
......
......@@ -92,13 +92,6 @@ mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
{
}
static inline int
mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent,
void *buf, int len)
{
return 0;
}
#endif
#endif /* __LIB_HV_VHCA_H__ */
......@@ -736,10 +736,9 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
if (err) {
u32 syndrome;
u8 status;
u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome);
u8 status = MLX5_GET(query_issi_out, query_out, status);
mlx5_cmd_mbox_status(query_out, &status, &syndrome);
if (!status || syndrome == MLX5_DRIVER_SYND) {
mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
err, status, syndrome);
......
......@@ -33,9 +33,10 @@
#include <linux/mlx5/port.h>
#include "mlx5_core.h"
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_id, int arg, int write)
/* calling with verbose false will not print error to log */
int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
void *data_out, int size_out, u16 reg_id, int arg,
int write, bool verbose)
{
int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
......@@ -57,7 +58,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
MLX5_SET(access_register_in, in, argument, arg);
MLX5_SET(access_register_in, in, register_id, reg_id);
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
err = mlx5_cmd_do(dev, in, inlen, out, outlen);
if (verbose)
err = mlx5_cmd_check(dev, err, in, out);
if (err)
goto out;
......@@ -69,6 +72,15 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_access_reg);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_id, int arg, int write)
{
return mlx5_access_reg(dev, data_in, size_in, data_out, size_out,
reg_id, arg, write, true);
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
......
......@@ -183,6 +183,8 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
complete(&cq->free);
}
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
......
......@@ -863,20 +863,10 @@ struct mlx5_hca_vport_context {
bool grh_required;
};
static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
{
return buf->frags->buf + offset;
}
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
{
return pci_get_drvdata(pdev);
}
extern struct dentry *mlx5_debugfs_root;
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
......@@ -965,6 +955,7 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
void *out; /* pointer to the cmd output buffer */
};
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
......@@ -973,7 +964,9 @@ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work);
void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
......@@ -991,7 +984,6 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
......@@ -1039,6 +1031,9 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
void *data_out, int size_out, u16 reg_id, int arg,
int write, bool verbose);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
......
......@@ -224,6 +224,7 @@ struct mlx5_flow_act {
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
struct mlx5_flow_group *fg;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
......
......@@ -9687,7 +9687,8 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x6b];
u8 reserved_at_0[0x6a];
u8 reset_state[0x1];
u8 ptpcyc2realtime_modify[0x1];
u8 reserved_at_6c[0x2];
u8 pci_status_and_power[0x1];
......@@ -10368,6 +10369,14 @@ struct mlx5_ifc_mcda_reg_bits {
u8 data[][0x20];
};
enum {
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3,
MLX5_MFRL_REG_RESET_STATE_NACK = 4,
};
enum {
MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0),
MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1),
......@@ -10386,7 +10395,8 @@ struct mlx5_ifc_mfrl_reg_bits {
u8 pci_sync_for_fw_update_start[0x1];
u8 pci_sync_for_fw_update_resp[0x2];
u8 rst_type_sel[0x3];
u8 reserved_at_28[0x8];
u8 reserved_at_28[0x4];
u8 reset_state[0x4];
u8 reset_type[0x8];
u8 reset_level[0x8];
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment