Commit 99e9acd8 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2018-10-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-updates-2018-10-17

========================================================================

From Or Gerlitz <ogerlitz@mellanox.com>:

This series from Paul adds support to mlx5 e-switch tc offloading of multiple priorities and chains.

This is made of four building blocks (along with few minor driver refactors):

[1] Split FDB fast path prio to multiple namespaces

Currently the FDB name-space contains two priorities, fast path (p0) and slow path (p1).
The slow path contains the per representor SQ send-to-vport TX rule and the match-all
RX miss rule. As a pre-step to support multi-chains and priorities, we split the FDB fast path
to multiple namespaces  (sub namespaces), each with multiple priorities.

[2] E-Switch chains and priorities

A chain is a group of priorities. We use the fdb parallel sub-namespaces to implement chains,
and a flow table for each priority in them.

Because these namespaces are parallel and in series to the slow path
fdb, the chains aren't connected to each other (but to the slow path),
and one must use a explicit goto action to reach a different chain.

Flow tables for the priorities are created on demand and destroyed
once not used.

[3] Add a no-append flow insertion mode, use it for TC offloads

Enhance the driver fs core, such that if a no-append flag is set by the caller,
we add a new FTE, instead of appending the actions of the inserted rule when
the same match already exists.

For encap rules, we defer the HW offloading till we have a valid neighbor. This can
result in the packet hitting a lower priority rule in the HW DP. Use the no-append API
to push these packets to the slow path FDB table, so they go to the TC kernel DP as done
before priorities where supported.

[4] Offloading tc priorities and chains for eswitch flows

Using [1], [2] and [3] above we add the support for offloading both chains
and priorities. To get to a new chain, use the tc goto action. We support
a fixed prio range 1-16, and chains 0-3.
=============================================================================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8f18da47 bf07aa73
...@@ -284,7 +284,7 @@ static bool devx_is_obj_create_cmd(const void *in) ...@@ -284,7 +284,7 @@ static bool devx_is_obj_create_cmd(const void *in)
case MLX5_CMD_OP_CREATE_FLOW_TABLE: case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
...@@ -627,9 +627,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, ...@@ -627,9 +627,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
break; break;
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
break; break;
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
......
...@@ -2793,7 +2793,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, ...@@ -2793,7 +2793,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
return -EINVAL; return -EINVAL;
action->flow_tag = ib_spec->flow_tag.tag_id; action->flow_tag = ib_spec->flow_tag.tag_id;
action->has_flow_tag = true; action->flags |= FLOW_ACT_HAS_TAG;
break; break;
case IB_FLOW_SPEC_ACTION_DROP: case IB_FLOW_SPEC_ACTION_DROP:
if (FIELDS_NOT_SUPPORTED(ib_spec->drop, if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
...@@ -2886,7 +2886,7 @@ is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, ...@@ -2886,7 +2886,7 @@ is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA; return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
return is_crypto && is_ipsec && return is_crypto && is_ipsec &&
(!egress || (!is_drop && !flow_act->has_flow_tag)) ? (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
VALID_SPEC_VALID : VALID_SPEC_INVALID; VALID_SPEC_VALID : VALID_SPEC_INVALID;
} }
...@@ -3320,15 +3320,18 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -3320,15 +3320,18 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
} }
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
struct mlx5_ib_mcounters *mcounters;
err = flow_counters_set_data(flow_act.counters, ucmd); err = flow_counters_set_data(flow_act.counters, ucmd);
if (err) if (err)
goto free; goto free;
mcounters = to_mcounters(flow_act.counters);
handler->ibcounters = flow_act.counters; handler->ibcounters = flow_act.counters;
dest_arr[dest_num].type = dest_arr[dest_num].type =
MLX5_FLOW_DESTINATION_TYPE_COUNTER; MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest_arr[dest_num].counter = dest_arr[dest_num].counter_id =
to_mcounters(flow_act.counters)->hw_cntrs_hndl; mlx5_fc_id(mcounters->hw_cntrs_hndl);
dest_num++; dest_num++;
} }
...@@ -3346,7 +3349,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -3346,7 +3349,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
} }
if (flow_act.has_flow_tag && if ((flow_act.flags & FLOW_ACT_HAS_TAG) &&
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
......
...@@ -1279,7 +1279,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, ...@@ -1279,7 +1279,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
if (dev->rep) if (dev->rep)
MLX5_SET(tirc, tirc, self_lb_block, MLX5_SET(tirc, tirc, self_lb_block,
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn); err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
...@@ -1582,7 +1582,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -1582,7 +1582,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
create_tir: create_tir:
if (dev->rep) if (dev->rep)
MLX5_SET(tirc, tirc, self_lb_block, MLX5_SET(tirc, tirc, self_lb_block,
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn); err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
......
...@@ -308,10 +308,11 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -308,10 +308,11 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_DESTROY_QP: case MLX5_CMD_OP_FPGA_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
return MLX5_CMD_STAT_OK; return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
...@@ -426,7 +427,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -426,7 +427,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_CREATE_QP: case MLX5_CMD_OP_FPGA_CREATE_QP:
case MLX5_CMD_OP_FPGA_MODIFY_QP: case MLX5_CMD_OP_FPGA_MODIFY_QP:
...@@ -435,6 +436,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -435,6 +436,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
case MLX5_CMD_OP_ALLOC_MEMIC:
*status = MLX5_DRIVER_STATUS_ABORTED; *status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND; *synd = MLX5_DRIVER_SYND;
return -EIO; return -EIO;
...@@ -599,8 +601,8 @@ const char *mlx5_command_str(int command) ...@@ -599,8 +601,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
...@@ -617,6 +619,8 @@ const char *mlx5_command_str(int command) ...@@ -617,6 +619,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -109,6 +109,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -109,6 +109,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cons_index = 0; cq->cons_index = 0;
cq->arm_sn = 0; cq->arm_sn = 0;
cq->eq = eq; cq->eq = eq;
cq->uid = MLX5_GET(create_cq_in, in, uid);
refcount_set(&cq->refcount, 1); refcount_set(&cq->refcount, 1);
init_completion(&cq->free); init_completion(&cq->free);
if (!cq->comp) if (!cq->comp)
...@@ -144,6 +145,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -144,6 +145,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
memset(dout, 0, sizeof(dout)); memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, din, uid, cq->uid);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err; return err;
} }
...@@ -165,6 +167,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) ...@@ -165,6 +167,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err) if (err)
return err; return err;
...@@ -196,6 +199,7 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -196,6 +199,7 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0}; u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ); MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
MLX5_SET(modify_cq_in, in, uid, cq->uid);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
} }
EXPORT_SYMBOL(mlx5_core_modify_cq); EXPORT_SYMBOL(mlx5_core_modify_cq);
......
...@@ -133,7 +133,7 @@ TRACE_EVENT(mlx5_fs_del_fg, ...@@ -133,7 +133,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\ {MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\ {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\
{MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\ {MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\
{MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\ {MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT, "REFORMAT"},\
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\ {MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
...@@ -252,10 +252,10 @@ TRACE_EVENT(mlx5_fs_add_rule, ...@@ -252,10 +252,10 @@ TRACE_EVENT(mlx5_fs_add_rule,
memcpy(__entry->destination, memcpy(__entry->destination,
&rule->dest_attr, &rule->dest_attr,
sizeof(__entry->destination)); sizeof(__entry->destination));
if (rule->dest_attr.type & MLX5_FLOW_DESTINATION_TYPE_COUNTER && if (rule->dest_attr.type &
rule->dest_attr.counter) MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id = __entry->counter_id =
rule->dest_attr.counter->id; rule->dest_attr.counter_id;
), ),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n", TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index, __entry->rule, __entry->fte, __entry->index,
......
...@@ -153,7 +153,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) ...@@ -153,7 +153,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
if (enable_uc_lb) if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block, MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
......
...@@ -3392,9 +3392,6 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, ...@@ -3392,9 +3392,6 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{ {
struct mlx5e_priv *priv = cb_priv; struct mlx5e_priv *priv = cb_priv;
if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
......
...@@ -853,9 +853,6 @@ static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data, ...@@ -853,9 +853,6 @@ static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
{ {
struct mlx5e_priv *priv = cb_priv; struct mlx5e_priv *priv = cb_priv;
if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS); return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
...@@ -869,9 +866,6 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, ...@@ -869,9 +866,6 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
{ {
struct mlx5e_priv *priv = cb_priv; struct mlx5e_priv *priv = cb_priv;
if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
......
...@@ -263,7 +263,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) ...@@ -263,7 +263,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
esw_debug(dev, "Create FDB log_max_size(%d)\n", esw_debug(dev, "Create FDB log_max_size(%d)\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); root_ns = mlx5_get_fdb_sub_ns(dev, 0);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n"); esw_warn(dev, "Failed to get FDB flow namespace\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1198,7 +1198,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1198,7 +1198,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
if (counter) { if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter = counter; drop_ctr_dst.counter_id = mlx5_fc_id(counter);
dst = &drop_ctr_dst; dst = &drop_ctr_dst;
dest_num++; dest_num++;
} }
...@@ -1285,7 +1285,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1285,7 +1285,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
if (counter) { if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter = counter; drop_ctr_dst.counter_id = mlx5_fc_id(counter);
dst = &drop_ctr_dst; dst = &drop_ctr_dst;
dest_num++; dest_num++;
} }
...@@ -1746,7 +1746,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1746,7 +1746,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0; esw->enabled_vports = 0;
esw->mode = SRIOV_NONE; esw->mode = SRIOV_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else else
......
...@@ -59,6 +59,10 @@ ...@@ -59,6 +59,10 @@
#define mlx5_esw_has_fwd_fdb(dev) \ #define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
#define FDB_MAX_CHAIN 3
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
#define FDB_MAX_PRIO 16
struct vport_ingress { struct vport_ingress {
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
struct mlx5_flow_group *allow_untagged_spoofchk_grp; struct mlx5_flow_group *allow_untagged_spoofchk_grp;
...@@ -120,6 +124,13 @@ struct mlx5_vport { ...@@ -120,6 +124,13 @@ struct mlx5_vport {
u16 enabled_events; u16 enabled_events;
}; };
enum offloads_fdb_flags {
ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
};
extern const unsigned int ESW_POOLS[4];
#define PRIO_LEVELS 2
struct mlx5_eswitch_fdb { struct mlx5_eswitch_fdb {
union { union {
struct legacy_fdb { struct legacy_fdb {
...@@ -130,16 +141,24 @@ struct mlx5_eswitch_fdb { ...@@ -130,16 +141,24 @@ struct mlx5_eswitch_fdb {
} legacy; } legacy;
struct offloads_fdb { struct offloads_fdb {
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_table *slow_fdb; struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp; struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *miss_rule_uni; struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi; struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount; int vlan_push_pop_refcount;
struct {
struct mlx5_flow_table *fdb;
u32 num_rules;
} fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS];
/* Protects fdb_prio table */
struct mutex fdb_prio_lock;
int fdb_left[ARRAY_SIZE(ESW_POOLS)];
} offloads; } offloads;
}; };
u32 flags;
}; };
struct mlx5_esw_offload { struct mlx5_esw_offload {
...@@ -181,6 +200,7 @@ struct mlx5_eswitch { ...@@ -181,6 +200,7 @@ struct mlx5_eswitch {
struct mlx5_esw_offload offloads; struct mlx5_esw_offload offloads;
int mode; int mode;
int nvports;
}; };
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
...@@ -228,6 +248,19 @@ void ...@@ -228,6 +248,19 @@ void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr);
bool
mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);
u16
mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);
u32
mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
...@@ -266,6 +299,10 @@ struct mlx5_esw_flow_attr { ...@@ -266,6 +299,10 @@ struct mlx5_esw_flow_attr {
u32 encap_id; u32 encap_id;
u32 mod_hdr_id; u32 mod_hdr_id;
u8 match_level; u8 match_level;
struct mlx5_fc *counter;
u32 chain;
u16 prio;
u32 dest_chain;
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
}; };
...@@ -318,6 +355,11 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} ...@@ -318,6 +355,11 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {} static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
#define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
#define FDB_MAX_PRIO 1
#endif /* CONFIG_MLX5_ESWITCH */ #endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */ #endif /* __MLX5_ESWITCH_H__ */
...@@ -650,7 +650,7 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, ...@@ -650,7 +650,7 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
(match_criteria_enable & (match_criteria_enable &
~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) || ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
(flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) || (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
flow_act->has_flow_tag) (flow_act->flags & FLOW_ACT_HAS_TAG))
return false; return false;
return true; return true;
......
...@@ -152,7 +152,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, ...@@ -152,7 +152,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *next_ft, struct mlx5_flow_table *next_ft,
unsigned int *table_id, u32 flags) unsigned int *table_id, u32 flags)
{ {
int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN); int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err; int err;
...@@ -169,9 +170,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, ...@@ -169,9 +170,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
} }
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_encap_decap); en_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en, MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap_decap); en_encap);
switch (op_mod) { switch (op_mod) {
case FS_FT_OP_MOD_NORMAL: case FS_FT_OP_MOD_NORMAL:
...@@ -343,7 +344,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -343,7 +344,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action.action); MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id); MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
fte->action.reformat_id);
MLX5_SET(flow_context, in_flow_context, modify_header_id, MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id); fte->action.modify_id);
...@@ -417,7 +419,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -417,7 +419,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue; continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id, MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
dst->dest_attr.counter->id); dst->dest_attr.counter_id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
list_size++; list_size++;
} }
...@@ -594,62 +596,78 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, ...@@ -594,62 +596,78 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*bytes = MLX5_GET64(traffic_counter, stats, octets); *bytes = MLX5_GET64(traffic_counter, stats, octets);
} }
int mlx5_encap_alloc(struct mlx5_core_dev *dev, int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
int header_type, int reformat_type,
size_t size, size_t size,
void *encap_header, void *reformat_data,
u32 *encap_id) enum mlx5_flow_namespace_type namespace,
u32 *packet_reformat_id)
{ {
int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size); u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)]; void *packet_reformat_context_in;
void *encap_header_in; int max_encap_size;
void *header; void *reformat;
int inlen; int inlen;
int err; int err;
u32 *in; u32 *in;
if (namespace == MLX5_FLOW_NAMESPACE_FDB)
max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
else
max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
if (size > max_encap_size) { if (size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
size, max_encap_size); size, max_encap_size);
return -EINVAL; return -EINVAL;
} }
in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size, in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
GFP_KERNEL); GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header); packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header); in, packet_reformat_context);
inlen = header - (void *)in + size; reformat = MLX5_ADDR_OF(packet_reformat_context_in,
packet_reformat_context_in,
reformat_data);
inlen = reformat - (void *)in + size;
memset(in, 0, inlen); memset(in, 0, inlen);
MLX5_SET(alloc_encap_header_in, in, opcode, MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER); MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size); MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
MLX5_SET(encap_header_in, encap_header_in, header_type, header_type); reformat_data_size, size);
memcpy(header, encap_header, size); MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_type, reformat_type);
memcpy(reformat, reformat_data, size);
memset(out, 0, sizeof(out)); memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
*encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id); *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
out, packet_reformat_id);
kfree(in); kfree(in);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id) void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
u32 packet_reformat_id)
{ {
u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)]; u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)]; u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
memset(in, 0, sizeof(in)); memset(in, 0, sizeof(in));
MLX5_SET(dealloc_encap_header_in, in, opcode, MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id); MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
packet_reformat_id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions, u8 namespace, u8 num_actions,
...@@ -667,9 +685,14 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, ...@@ -667,9 +685,14 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
table_type = FS_FT_FDB; table_type = FS_FT_FDB;
break; break;
case MLX5_FLOW_NAMESPACE_KERNEL: case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_BYPASS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions); max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX; table_type = FS_FT_NIC_RX;
break; break;
case MLX5_FLOW_NAMESPACE_EGRESS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -702,6 +725,7 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, ...@@ -702,6 +725,7 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
kfree(in); kfree(in);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_modify_header_alloc);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
{ {
...@@ -716,6 +740,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) ...@@ -716,6 +740,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
EXPORT_SYMBOL(mlx5_modify_header_dealloc);
static const struct mlx5_flow_cmds mlx5_flow_cmds = { static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table, .create_flow_table = mlx5_cmd_create_flow_table,
...@@ -760,8 +785,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ ...@@ -760,8 +785,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_FDB: case FS_FT_FDB:
case FS_FT_SNIFFER_RX: case FS_FT_SNIFFER_RX:
case FS_FT_SNIFFER_TX: case FS_FT_SNIFFER_TX:
return mlx5_fs_cmd_get_fw_cmds();
case FS_FT_NIC_TX: case FS_FT_NIC_TX:
return mlx5_fs_cmd_get_fw_cmds();
default: default:
return mlx5_fs_cmd_get_stub_cmds(); return mlx5_fs_cmd_get_stub_cmds();
} }
......
...@@ -38,9 +38,21 @@ ...@@ -38,9 +38,21 @@
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <linux/llist.h> #include <linux/llist.h>
/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only,
* and those are in parallel to one another when going over them to connect
* a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one
* parallel namespace will not automatically connect to the first flow table
* found in any prio in any next namespace, but skip the entire containing
* TYPE_PRIO_CHAINS prio.
*
* This is used to implement tc chains, each chain of prios is a different
* namespace inside a containing TYPE_PRIO_CHAINS prio.
*/
enum fs_node_type { enum fs_node_type {
FS_TYPE_NAMESPACE, FS_TYPE_NAMESPACE,
FS_TYPE_PRIO, FS_TYPE_PRIO,
FS_TYPE_PRIO_CHAINS,
FS_TYPE_FLOW_TABLE, FS_TYPE_FLOW_TABLE,
FS_TYPE_FLOW_GROUP, FS_TYPE_FLOW_GROUP,
FS_TYPE_FLOW_ENTRY, FS_TYPE_FLOW_ENTRY,
...@@ -73,6 +85,7 @@ struct mlx5_flow_steering { ...@@ -73,6 +85,7 @@ struct mlx5_flow_steering {
struct kmem_cache *ftes_cache; struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns; struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_namespace **fdb_sub_ns;
struct mlx5_flow_root_namespace **esw_egress_root_ns; struct mlx5_flow_root_namespace **esw_egress_root_ns;
struct mlx5_flow_root_namespace **esw_ingress_root_ns; struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
......
...@@ -258,6 +258,12 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) ...@@ -258,6 +258,12 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
} }
EXPORT_SYMBOL(mlx5_fc_create); EXPORT_SYMBOL(mlx5_fc_create);
u32 mlx5_fc_id(struct mlx5_fc *counter)
{
return counter->id;
}
EXPORT_SYMBOL(mlx5_fc_id);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{ {
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/if_link.h> #include <linux/if_link.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/mlx5/cq.h> #include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
#define DRIVER_NAME "mlx5_core" #define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0" #define DRIVER_VERSION "5.0-0"
...@@ -171,17 +172,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); ...@@ -171,17 +172,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void); void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void); void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void); int mlx5_dev_list_trylock(void);
int mlx5_encap_alloc(struct mlx5_core_dev *dev,
int header_type,
size_t size,
void *encap_header,
u32 *encap_id);
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions,
void *modify_actions, u32 *modify_header_id);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
......
...@@ -211,6 +211,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, ...@@ -211,6 +211,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
} }
qp->qpn = MLX5_GET(create_dct_out, out, dctn); qp->qpn = MLX5_GET(create_dct_out, out, dctn);
qp->uid = MLX5_GET(create_dct_in, in, uid);
err = create_resource_common(dev, qp, MLX5_RES_DCT); err = create_resource_common(dev, qp, MLX5_RES_DCT);
if (err) if (err)
goto err_cmd; goto err_cmd;
...@@ -219,6 +220,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, ...@@ -219,6 +220,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
err_cmd: err_cmd:
MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT); MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, din, dctn, qp->qpn); MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
MLX5_SET(destroy_dct_in, din, uid, qp->uid);
mlx5_cmd_exec(dev, (void *)&in, sizeof(din), mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
(void *)&out, sizeof(dout)); (void *)&out, sizeof(dout));
return err; return err;
...@@ -240,6 +242,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, ...@@ -240,6 +242,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
if (err) if (err)
return err; return err;
qp->uid = MLX5_GET(create_qp_in, in, uid);
qp->qpn = MLX5_GET(create_qp_out, out, qpn); qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
...@@ -261,6 +264,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, ...@@ -261,6 +264,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
memset(dout, 0, sizeof(dout)); memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn); MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, din, uid, qp->uid);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err; return err;
} }
...@@ -275,6 +279,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, ...@@ -275,6 +279,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT); MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
MLX5_SET(drain_dct_in, in, dctn, qp->qpn); MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
MLX5_SET(drain_dct_in, in, uid, qp->uid);
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in), return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out)); (void *)&out, sizeof(out));
} }
...@@ -301,6 +306,7 @@ int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, ...@@ -301,6 +306,7 @@ int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
destroy_resource_common(dev, &dct->mqp); destroy_resource_common(dev, &dct->mqp);
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT); MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
MLX5_SET(destroy_dct_in, in, uid, qp->uid);
err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in), err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out)); (void *)&out, sizeof(out));
return err; return err;
...@@ -320,6 +326,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, ...@@ -320,6 +326,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, in, uid, qp->uid);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err) if (err)
return err; return err;
...@@ -373,7 +380,7 @@ static void mbox_free(struct mbox_info *mbox) ...@@ -373,7 +380,7 @@ static void mbox_free(struct mbox_info *mbox)
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
u32 opt_param_mask, void *qpc, u32 opt_param_mask, void *qpc,
struct mbox_info *mbox) struct mbox_info *mbox, u16 uid)
{ {
mbox->out = NULL; mbox->out = NULL;
mbox->in = NULL; mbox->in = NULL;
...@@ -381,26 +388,32 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, ...@@ -381,26 +388,32 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
#define MBOX_ALLOC(mbox, typ) \ #define MBOX_ALLOC(mbox, typ) \
mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out)) mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \ #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
MLX5_SET(typ##_in, in, opcode, _opcode); \ do { \
MLX5_SET(typ##_in, in, qpn, _qpn) MLX5_SET(typ##_in, in, opcode, _opcode); \
MLX5_SET(typ##_in, in, qpn, _qpn); \
#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \ MLX5_SET(typ##_in, in, uid, _uid); \
MOD_QP_IN_SET(typ, in, _opcode, _qpn); \ } while (0)
MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc)) #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
do { \
MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
MLX5_ST_SZ_BYTES(qpc)); \
} while (0)
switch (opcode) { switch (opcode) {
/* 2RST & 2ERR */ /* 2RST & 2ERR */
case MLX5_CMD_OP_2RST_QP: case MLX5_CMD_OP_2RST_QP:
if (MBOX_ALLOC(mbox, qp_2rst)) if (MBOX_ALLOC(mbox, qp_2rst))
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn); MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
break; break;
case MLX5_CMD_OP_2ERR_QP: case MLX5_CMD_OP_2ERR_QP:
if (MBOX_ALLOC(mbox, qp_2err)) if (MBOX_ALLOC(mbox, qp_2err))
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn); MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
break; break;
/* MODIFY with QPC */ /* MODIFY with QPC */
...@@ -408,37 +421,37 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, ...@@ -408,37 +421,37 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
if (MBOX_ALLOC(mbox, rst2init_qp)) if (MBOX_ALLOC(mbox, rst2init_qp))
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc); opt_param_mask, qpc, uid);
break; break;
case MLX5_CMD_OP_INIT2RTR_QP: case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp)) if (MBOX_ALLOC(mbox, init2rtr_qp))
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn, MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc); opt_param_mask, qpc, uid);
break; break;
case MLX5_CMD_OP_RTR2RTS_QP: case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp)) if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn, MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc); opt_param_mask, qpc, uid);
break; break;
case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp)) if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn, MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc); opt_param_mask, qpc, uid);
break; break;
case MLX5_CMD_OP_SQERR2RTS_QP: case MLX5_CMD_OP_SQERR2RTS_QP:
if (MBOX_ALLOC(mbox, sqerr2rts_qp)) if (MBOX_ALLOC(mbox, sqerr2rts_qp))
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn, MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc); opt_param_mask, qpc, uid);
break; break;
case MLX5_CMD_OP_INIT2INIT_QP: case MLX5_CMD_OP_INIT2INIT_QP:
if (MBOX_ALLOC(mbox, init2init_qp)) if (MBOX_ALLOC(mbox, init2init_qp))
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn, MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc); opt_param_mask, qpc, uid);
break; break;
default: default:
mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n", mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
...@@ -456,7 +469,7 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, ...@@ -456,7 +469,7 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
int err; int err;
err = modify_qp_mbox_alloc(dev, opcode, qp->qpn, err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
opt_param_mask, qpc, &mbox); opt_param_mask, qpc, &mbox, qp->uid);
if (err) if (err)
return err; return err;
...@@ -531,6 +544,17 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) ...@@ -531,6 +544,17 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
} }
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
MLX5_SET(destroy_rq_in, in, uid, uid);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq) struct mlx5_core_qp *rq)
{ {
...@@ -541,6 +565,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, ...@@ -541,6 +565,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
if (err) if (err)
return err; return err;
rq->uid = MLX5_GET(create_rq_in, in, uid);
rq->qpn = rqn; rq->qpn = rqn;
err = create_resource_common(dev, rq, MLX5_RES_RQ); err = create_resource_common(dev, rq, MLX5_RES_RQ);
if (err) if (err)
...@@ -549,7 +574,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, ...@@ -549,7 +574,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return 0; return 0;
err_destroy_rq: err_destroy_rq:
mlx5_core_destroy_rq(dev, rq->qpn); destroy_rq_tracked(dev, rq->qpn, rq->uid);
return err; return err;
} }
...@@ -559,10 +584,21 @@ void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, ...@@ -559,10 +584,21 @@ void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *rq) struct mlx5_core_qp *rq)
{ {
destroy_resource_common(dev, rq); destroy_resource_common(dev, rq);
mlx5_core_destroy_rq(dev, rq->qpn); destroy_rq_tracked(dev, rq->qpn, rq->uid);
} }
EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked); EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
MLX5_SET(destroy_sq_in, in, uid, uid);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq) struct mlx5_core_qp *sq)
{ {
...@@ -573,6 +609,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, ...@@ -573,6 +609,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
if (err) if (err)
return err; return err;
sq->uid = MLX5_GET(create_sq_in, in, uid);
sq->qpn = sqn; sq->qpn = sqn;
err = create_resource_common(dev, sq, MLX5_RES_SQ); err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err) if (err)
...@@ -581,7 +618,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, ...@@ -581,7 +618,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return 0; return 0;
err_destroy_sq: err_destroy_sq:
mlx5_core_destroy_sq(dev, sq->qpn); destroy_sq_tracked(dev, sq->qpn, sq->uid);
return err; return err;
} }
...@@ -591,7 +628,7 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, ...@@ -591,7 +628,7 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq) struct mlx5_core_qp *sq)
{ {
destroy_resource_common(dev, sq); destroy_resource_common(dev, sq);
mlx5_core_destroy_sq(dev, sq->qpn); destroy_sq_tracked(dev, sq->qpn, sq->uid);
} }
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
......
...@@ -166,6 +166,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, ...@@ -166,6 +166,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
if (!create_in) if (!create_in)
return -ENOMEM; return -ENOMEM;
MLX5_SET(create_srq_in, create_in, uid, in->uid);
srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry); srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
pas = MLX5_ADDR_OF(create_srq_in, create_in, pas); pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
...@@ -178,8 +179,10 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, ...@@ -178,8 +179,10 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
err = mlx5_cmd_exec(dev, create_in, inlen, create_out, err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
sizeof(create_out)); sizeof(create_out));
kvfree(create_in); kvfree(create_in);
if (!err) if (!err) {
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
srq->uid = in->uid;
}
return err; return err;
} }
...@@ -193,6 +196,7 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev, ...@@ -193,6 +196,7 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_srq_in, srq_in, opcode, MLX5_SET(destroy_srq_in, srq_in, opcode,
MLX5_CMD_OP_DESTROY_SRQ); MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn); MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out)); srq_out, sizeof(srq_out));
...@@ -208,6 +212,7 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, ...@@ -208,6 +212,7 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ); MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn); MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
MLX5_SET(arm_rq_in, srq_in, lwm, lwm); MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out)); srq_out, sizeof(srq_out));
...@@ -260,6 +265,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, ...@@ -260,6 +265,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
if (!create_in) if (!create_in)
return -ENOMEM; return -ENOMEM;
MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
xrc_srq_context_entry); xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
...@@ -277,6 +283,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, ...@@ -277,6 +283,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
goto out; goto out;
srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn); srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
srq->uid = in->uid;
out: out:
kvfree(create_in); kvfree(create_in);
return err; return err;
...@@ -291,6 +298,7 @@ static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev, ...@@ -291,6 +298,7 @@ static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode, MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ); MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out)); xrcsrq_out, sizeof(xrcsrq_out));
...@@ -306,6 +314,7 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, ...@@ -306,6 +314,7 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out)); xrcsrq_out, sizeof(xrcsrq_out));
...@@ -365,10 +374,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, ...@@ -365,10 +374,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
wq = MLX5_ADDR_OF(rmpc, rmpc, wq); wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
MLX5_SET(create_rmp_in, create_in, uid, in->uid);
set_wq(wq, in); set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
if (!err)
srq->uid = in->uid;
kvfree(create_in); kvfree(create_in);
return err; return err;
...@@ -377,7 +389,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, ...@@ -377,7 +389,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int destroy_rmp_cmd(struct mlx5_core_dev *dev, static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq) struct mlx5_core_srq *srq)
{ {
return mlx5_core_destroy_rmp(dev, srq->srqn); u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
static int arm_rmp_cmd(struct mlx5_core_dev *dev, static int arm_rmp_cmd(struct mlx5_core_dev *dev,
...@@ -400,6 +418,7 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev, ...@@ -400,6 +418,7 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn); MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
MLX5_SET(modify_rmp_in, in, uid, srq->uid);
MLX5_SET(wq, wq, lwm, lwm); MLX5_SET(wq, wq, lwm, lwm);
MLX5_SET(rmp_bitmask, bitmask, lwm, 1); MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
...@@ -469,11 +488,14 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, ...@@ -469,11 +488,14 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(xrqc, xrqc, user_index, in->user_index); MLX5_SET(xrqc, xrqc, user_index, in->user_index);
MLX5_SET(xrqc, xrqc, cqn, in->cqn); MLX5_SET(xrqc, xrqc, cqn, in->cqn);
MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ); MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
MLX5_SET(create_xrq_in, create_in, uid, in->uid);
err = mlx5_cmd_exec(dev, create_in, inlen, create_out, err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
sizeof(create_out)); sizeof(create_out));
kvfree(create_in); kvfree(create_in);
if (!err) if (!err) {
srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn); srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
srq->uid = in->uid;
}
return err; return err;
} }
...@@ -485,6 +507,7 @@ static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) ...@@ -485,6 +507,7 @@ static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ); MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn); MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
...@@ -500,6 +523,7 @@ static int arm_xrq_cmd(struct mlx5_core_dev *dev, ...@@ -500,6 +523,7 @@ static int arm_xrq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ); MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
MLX5_SET(arm_rq_in, in, srq_number, srq->srqn); MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
MLX5_SET(arm_rq_in, in, lwm, lwm); MLX5_SET(arm_rq_in, in, lwm, lwm);
MLX5_SET(arm_rq_in, in, uid, srq->uid);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
......
...@@ -61,6 +61,7 @@ struct mlx5_core_cq { ...@@ -61,6 +61,7 @@ struct mlx5_core_cq {
int reset_notify_added; int reset_notify_added;
struct list_head reset_notify; struct list_head reset_notify;
struct mlx5_eq *eq; struct mlx5_eq *eq;
u16 uid;
}; };
......
...@@ -1124,6 +1124,12 @@ enum mlx5_qcam_feature_groups { ...@@ -1124,6 +1124,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
......
...@@ -163,10 +163,7 @@ enum mlx5_dcbx_oper_mode { ...@@ -163,10 +163,7 @@ enum mlx5_dcbx_oper_mode {
}; };
enum mlx5_dct_atomic_mode { enum mlx5_dct_atomic_mode {
MLX5_ATOMIC_MODE_DCT_OFF = 20, MLX5_ATOMIC_MODE_DCT_CX = 2,
MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
}; };
enum { enum {
...@@ -477,6 +474,7 @@ struct mlx5_core_srq { ...@@ -477,6 +474,7 @@ struct mlx5_core_srq {
atomic_t refcount; atomic_t refcount;
struct completion free; struct completion free;
u16 uid;
}; };
struct mlx5_eq_table { struct mlx5_eq_table {
......
...@@ -45,7 +45,8 @@ enum { ...@@ -45,7 +45,8 @@ enum {
}; };
enum { enum {
MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0), MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
}; };
#define LEFTOVERS_RULE_NUM 2 #define LEFTOVERS_RULE_NUM 2
...@@ -91,7 +92,7 @@ struct mlx5_flow_destination { ...@@ -91,7 +92,7 @@ struct mlx5_flow_destination {
u32 tir_num; u32 tir_num;
u32 ft_num; u32 ft_num;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
struct mlx5_fc *counter; u32 counter_id;
struct { struct {
u16 num; u16 num;
u16 vhca_id; u16 vhca_id;
...@@ -100,6 +101,8 @@ struct mlx5_flow_destination { ...@@ -100,6 +101,8 @@ struct mlx5_flow_destination {
}; };
}; };
struct mlx5_flow_namespace *
mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
struct mlx5_flow_namespace * struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev, mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type); enum mlx5_flow_namespace_type type);
...@@ -155,20 +158,28 @@ struct mlx5_fs_vlan { ...@@ -155,20 +158,28 @@ struct mlx5_fs_vlan {
#define MLX5_FS_VLAN_DEPTH 2 #define MLX5_FS_VLAN_DEPTH 2
enum {
FLOW_ACT_HAS_TAG = BIT(0),
FLOW_ACT_NO_APPEND = BIT(1),
};
struct mlx5_flow_act { struct mlx5_flow_act {
u32 action; u32 action;
bool has_flow_tag;
u32 flow_tag; u32 flow_tag;
u32 encap_id; u32 reformat_id;
u32 modify_id; u32 modify_id;
uintptr_t esp_id; uintptr_t esp_id;
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters; struct ib_counters *counters;
}; };
#define MLX5_DECLARE_FLOW_ACT(name) \ #define MLX5_DECLARE_FLOW_ACT(name) \
struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
MLX5_FS_DEFAULT_FLOW_TAG, 0, 0} .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, \
.reformat_id = 0, \
.modify_id = 0, \
.flags = 0, }
/* Single destination per rule. /* Single destination per rule.
* Group ID is implied by the match criteria. * Group ID is implied by the match criteria.
...@@ -185,15 +196,30 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, ...@@ -185,15 +196,30 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *new_dest, struct mlx5_flow_destination *new_dest,
struct mlx5_flow_destination *old_dest); struct mlx5_flow_destination *old_dest);
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter, void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse); u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes); u64 *packets, u64 *bytes);
u32 mlx5_fc_id(struct mlx5_fc *counter);
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions,
void *modify_actions, u32 *modify_header_id);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
u32 modify_header_id);
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
int reformat_type,
size_t size,
void *reformat_data,
enum mlx5_flow_namespace_type namespace,
u32 *packet_reformat_id);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
u32 packet_reformat_id);
#endif #endif
This diff is collapsed.
...@@ -471,6 +471,7 @@ struct mlx5_core_qp { ...@@ -471,6 +471,7 @@ struct mlx5_core_qp {
int qpn; int qpn;
struct mlx5_rsc_debug *dbg; struct mlx5_rsc_debug *dbg;
int pid; int pid;
u16 uid;
}; };
struct mlx5_core_dct { struct mlx5_core_dct {
......
...@@ -61,6 +61,7 @@ struct mlx5_srq_attr { ...@@ -61,6 +61,7 @@ struct mlx5_srq_attr {
u32 tm_next_tag; u32 tm_next_tag;
u32 tm_hw_phase_cnt; u32 tm_hw_phase_cnt;
u32 tm_sw_phase_cnt; u32 tm_sw_phase_cnt;
u16 uid;
}; };
struct mlx5_core_dev; struct mlx5_core_dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment