Commit a5504093 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-08-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
This patch series contains various fixes, additions and improvements to
mlx5 software steering.

Patch 1:
  adds support for REMOVE_HEADER packet reformat - a new reformat type
  that is supported starting with ConnectX-6 DX, and allows removing an
  arbitrary size packet segment at a selected position.

Patches 2 and 3:
  add support for VLAN pop on TX and VLAN push on RX flows.

Patch 4:
  enables retransmission mechanism for the SW Steering RC QP.

Patch 5:
  does some improvements to error flow in building STE array and adds
  a more informative printout of an invalid actions sequence.

Patch 6:
  improves error flow on SW Steering QP error.

Patch 7:
  reduces the log level of a message that is printed when a table is
  connected to a lower/same level destination table, as this case proves to
  be not as rare as it was in the past.

Patch 8:
  adds missing support for matching on IPv6 flow label for devices
  older than ConnectX-6 DX.

Patch 9:
  replaces uintN_t types with kernel-style types.

Patch 10:
  allows for using the right API for updating flow tables - if it is
  a FW-owned table, then FW API will be used.

Patch 11:
  adds support for 'ignore_flow_level' on multi-destination flow
  tables that are created by SW Steering.

Patch 12:
   optimizes FDB RX steering rule by skipping matching on source port,
   as the source port for all incoming packets equals to wire.

Patch 13:
   is a small code refactoring - it merges several DR_STE_SIZE enums
   into a single enum.

Patch 14:
   does some additional refactoring and removes HW-specific STE type
   from NIC domain.

Patch 15:
   removes rehash ctrl struct from dr_htbl struct and saves some memory.

Patch 16:
   does a more significant improvement in terms of memory consumption
   and was able to save about 1.6 Gb for 8M rules.

Patch 17:
   adds support for update FTE, which is needed for cases where there
   are multiple rules with the same match.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5ab54e57 a2ebfbb7
......@@ -655,6 +655,7 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
if (ft->vport) {
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, 1);
......
......@@ -245,7 +245,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
break;
......@@ -254,7 +254,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
break;
......@@ -265,8 +265,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
return -ENOTSUPP;
dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
if (!vport_cap) {
mlx5dr_err(dmn, "Failed to get esw manager vport\n");
......
......@@ -103,7 +103,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
int num_dest,
bool reformat_req,
u32 *tbl_id,
u32 *group_id)
u32 *group_id,
bool ignore_flow_level)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {};
......@@ -137,6 +138,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.dests_size = num_dest;
fte_info.val = val;
fte_info.dest_arr = dest;
fte_info.ignore_flow_level = ignore_flow_level;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) {
......
......@@ -396,13 +396,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_match_param mask = {};
bool allow_empty_match = false;
struct mlx5dr_ste_build *sb;
bool inner, rx;
int idx = 0;
int ret, i;
sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
/* Create a temporary mask to track and clear used mask fields */
if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
......@@ -428,6 +429,16 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (ret)
return ret;
/* Optimize RX pipe by reducing source port match, since
* the FDB RX part is connected only to the wire.
*/
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
rx && mask.misc.source_port) {
mask.misc.source_port = 0;
mask.misc.source_eswitch_owner_vhca_id = 0;
allow_empty_match = true;
}
/* Outer */
if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
DR_MATCHER_CRITERIA_MISC |
......@@ -619,7 +630,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
}
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
if ((!idx && allow_empty_match) ||
matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
if (idx == 0) {
......
......@@ -81,6 +81,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
}
ste->ste_chain_location = orig_ste->ste_chain_location;
ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
......@@ -185,6 +186,9 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
return NULL;
/* Update collision pointing STE */
new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
......@@ -212,7 +216,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
new_ste->next_htbl = cur_ste->next_htbl;
new_ste->ste_chain_location = cur_ste->ste_chain_location;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
if (new_ste->next_htbl)
new_ste->next_htbl->pointing_ste = new_ste;
/* We need to copy the refcount since this ste
......@@ -220,10 +224,8 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
*/
new_ste->refcount = cur_ste->refcount;
/* Link old STEs rule_mem list to the new ste */
mlx5dr_rule_update_rule_member(cur_ste, new_ste);
INIT_LIST_HEAD(&new_ste->rule_list);
list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
/* Link old STEs rule to the new ste */
mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
}
static struct mlx5dr_ste *
......@@ -404,7 +406,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn,
nic_dmn->type,
new_htbl,
formatted_ste,
&info);
......@@ -581,34 +583,66 @@ static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
return -ENOMEM;
}
/* While the pointer of ste is no longer valid, like while moving ste to be
* the first in the miss_list, and to be in the origin table,
* all rule-members that are attached to this ste should update their ste member
* to the new pointer
*/
void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
struct mlx5dr_ste *new_ste)
void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
struct mlx5dr_ste *ste,
bool force)
{
/* Update rule member is usually done for the last STE or during rule
* creation to recover from mid-creation failure (for this peruse the
* force flag is used)
*/
if (ste->next_htbl && !force)
return;
/* Update is required since each rule keeps track of its last STE */
ste->rule_rx_tx = nic_rule;
nic_rule->last_rule_ste = ste;
}
static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
{
struct mlx5dr_ste *first_ste;
first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
struct mlx5dr_ste, miss_list_node);
return first_ste->htbl->pointing_ste;
}
int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_ste *curr_ste,
int *num_of_stes)
{
struct mlx5dr_rule_member *rule_mem;
bool first = false;
*num_of_stes = 0;
if (!curr_ste)
return -ENOENT;
/* Iterate from last to first */
while (!first) {
first = curr_ste->ste_chain_location == 1;
ste_arr[*num_of_stes] = curr_ste;
*num_of_stes += 1;
curr_ste = dr_rule_get_pointed_ste(curr_ste);
}
list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
rule_mem->ste = new_ste;
return 0;
}
static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
struct mlx5dr_rule_member *rule_mem;
struct mlx5dr_rule_member *tmp_mem;
struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
int i;
if (list_empty(&nic_rule->rule_members_list))
if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
return;
list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
list_del(&rule_mem->list);
list_del(&rule_mem->use_ste_list);
mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
kvfree(rule_mem);
}
while (i--)
mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
}
static u16 dr_get_bits_per_mask(u16 byte_mask)
......@@ -628,43 +662,25 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain_rx_tx *nic_dmn)
{
struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
int threshold;
if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
return false;
if (!ctrl->may_grow)
if (!mlx5dr_ste_htbl_may_grow(htbl))
return false;
if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
return false;
if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
if (ctrl->num_of_collisions >= threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
return true;
return false;
}
static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
struct mlx5dr_ste *ste)
{
struct mlx5dr_rule_member *rule_mem;
rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
if (!rule_mem)
return -ENOMEM;
INIT_LIST_HEAD(&rule_mem->list);
INIT_LIST_HEAD(&rule_mem->use_ste_list);
rule_mem->ste = ste;
list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
return 0;
}
static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule,
struct list_head *send_ste_list,
......@@ -679,15 +695,13 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste;
int i, k, ret;
int i, k;
/* Two cases:
* 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
* 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
* to support the action.
*/
if (num_of_builders == new_hw_ste_arr_sz)
return 0;
for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
......@@ -700,6 +714,10 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
mlx5dr_ste_get(action_ste);
action_ste->htbl->pointing_ste = last_ste;
last_ste->next_htbl = action_ste->htbl;
last_ste = action_ste;
/* While free ste we go over the miss list, so add this ste to the list */
list_add_tail(&action_ste->miss_list_node,
mlx5dr_ste_get_miss_list(action_ste));
......@@ -713,21 +731,19 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
prev_hw_ste,
action_ste->htbl);
ret = dr_rule_add_member(nic_rule, action_ste);
if (ret) {
mlx5dr_dbg(dmn, "Failed adding rule member\n");
goto free_ste_info;
}
mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
curr_hw_ste,
ste_info_arr[k],
send_ste_list, false);
}
last_ste->next_htbl = NULL;
return 0;
free_ste_info:
kfree(ste_info_arr[k]);
err_exit:
mlx5dr_ste_put(action_ste, matcher, nic_matcher);
return -ENOMEM;
......@@ -1015,12 +1031,12 @@ static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
}
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
enum mlx5dr_ste_entry_type ste_type,
enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value,
u32 flow_source)
{
bool rx = ste_type == MLX5DR_STE_TYPE_RX;
bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
if (domain != MLX5DR_DOMAIN_TYPE_FDB)
return false;
......@@ -1065,9 +1081,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
nic_matcher = nic_rule->nic_matcher;
nic_dmn = nic_matcher->nic_tbl->nic_dmn;
INIT_LIST_HEAD(&nic_rule->rule_members_list);
if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param,
if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
rule->flow_source))
return 0;
......@@ -1121,14 +1135,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
cur_htbl = ste->next_htbl;
/* Keep all STEs in the rule struct */
ret = dr_rule_add_member(nic_rule, ste);
if (ret) {
mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
goto free_ste;
}
mlx5dr_ste_get(ste);
mlx5dr_rule_set_last_member(nic_rule, ste, true);
}
/* Connect actions */
......@@ -1153,8 +1161,6 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
return 0;
free_ste:
mlx5dr_ste_put(ste, matcher, nic_matcher);
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
/* Clean all ste_info's */
......
......@@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
do {
ne = dr_poll_cq(send_ring->cq, 1);
if (ne < 0)
if (unlikely(ne < 0)) {
mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
send_ring->qp->qpn);
send_ring->err_state = true;
return ne;
else if (ne == 1)
} else if (ne == 1) {
send_ring->pending_wqe -= send_ring->signal_th;
}
} while (is_drain && send_ring->pending_wqe);
return 0;
......@@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
u32 buff_offset;
int ret;
if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
send_ring->err_state)) {
mlx5_core_dbg_once(dmn->mdev,
"Skipping post send: QP err state: %d, device state: %d\n",
send_ring->err_state, dmn->mdev->state);
return 0;
}
spin_lock(&send_ring->lock);
ret = dr_handle_pending_wc(dmn, send_ring);
......@@ -620,6 +632,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
......
......@@ -172,9 +172,6 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
dst->next_htbl->pointing_ste = dst;
dst->refcount = src->refcount;
INIT_LIST_HEAD(&dst->rule_list);
list_splice_tail_init(&src->rule_list, &dst->rule_list);
}
/* Free ste which is the head and the only one in miss_list */
......@@ -233,12 +230,12 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
/* Remove from the miss_list the next_ste before copy */
list_del_init(&next_ste->miss_list_node);
/* All rule-members that use next_ste should know about that */
mlx5dr_rule_update_rule_member(next_ste, ste);
/* Move data from next into ste */
dr_ste_replace(ste, next_ste);
/* Update the rule on STE change */
mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
/* Copy all 64 hw_ste bytes */
memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
sb_idx = ste->ste_chain_location - 1;
......@@ -382,14 +379,15 @@ void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
struct mlx5dr_htbl_connect_info *connect_info)
{
bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
struct mlx5dr_ste ste = {};
ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
ste.hw_ste = formatted_ste;
if (connect_info->type == CONNECT_HIT)
......@@ -408,7 +406,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn,
nic_dmn->type,
htbl,
formatted_ste,
connect_info);
......@@ -466,21 +464,6 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
return -ENOENT;
}
static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
{
struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
int num_of_entries;
htbl->ctrl.may_grow = true;
if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
htbl->ctrl.may_grow = false;
/* Threshold is 50%, one is added to table of size 1 */
num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
ctrl->increase_threshold = (num_of_entries + 1) / 2;
}
struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
u16 lu_type, u16 byte_mask)
......@@ -513,11 +496,9 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
INIT_LIST_HEAD(&htbl->miss_list[i]);
INIT_LIST_HEAD(&ste->rule_list);
}
htbl->chunk_size = chunk_size;
dr_ste_set_ctrl(htbl);
return htbl;
out_free_htbl:
......@@ -649,6 +630,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
u8 *ste_arr)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_build *sb;
......@@ -663,7 +645,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
for (i = 0; i < nic_matcher->num_of_builders; i++) {
ste_ctx->ste_init(ste_arr,
sb->lu_type,
nic_dmn->ste_type,
is_rx,
dmn->info.caps.gvmi);
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
......
......@@ -146,7 +146,7 @@ struct mlx5dr_ste_ctx {
/* Getters and Setters */
void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
u8 entry_type, u16 gvmi);
bool is_rx, u16 gvmi);
void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
u16 (*get_next_lu_type)(u8 *hw_ste_p);
void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
......
......@@ -8,6 +8,12 @@
#define SVLAN_ETHERTYPE 0x88a8
#define DR_STE_ENABLE_FLOW_TAG BIT(31)
enum dr_ste_v0_entry_type {
DR_STE_TYPE_TX = 1,
DR_STE_TYPE_RX = 2,
DR_STE_TYPE_MODIFY_PKT = 6,
};
enum dr_ste_v0_action_tunl {
DR_STE_TUNL_ACTION_NONE = 0,
DR_STE_TUNL_ACTION_ENABLE = 1,
......@@ -292,8 +298,8 @@ static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
}
static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
u8 entry_type, u16 gvmi)
static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type,
enum dr_ste_v0_entry_type entry_type, u16 gvmi)
{
dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
......@@ -307,6 +313,15 @@ static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
}
static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
bool is_rx, u16 gvmi)
{
enum dr_ste_v0_entry_type entry_type;
entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX;
dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi);
}
static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
......@@ -380,13 +395,13 @@ static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
static void dr_ste_v0_arr_init_next(u8 **last_ste,
u32 *added_stes,
enum mlx5dr_ste_entry_type entry_type,
enum dr_ste_v0_entry_type entry_type,
u16 gvmi)
{
(*added_stes)++;
*last_ste += DR_STE_SIZE;
dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
entry_type, gvmi);
dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
entry_type, gvmi);
}
static void
......@@ -404,7 +419,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
* modify headers for outer headers only
*/
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
attr->modify_index);
......@@ -417,7 +432,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_TX,
DR_STE_TYPE_TX,
attr->gvmi);
dr_ste_v0_set_tx_push_vlan(last_ste,
......@@ -435,7 +450,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
action_type_set[DR_ACTION_TYP_PUSH_VLAN])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_TX,
DR_STE_TYPE_TX,
attr->gvmi);
dr_ste_v0_set_tx_encap(last_ste,
......@@ -469,7 +484,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->decap_actions,
......@@ -488,7 +503,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_RX,
DR_STE_TYPE_RX,
attr->gvmi);
dr_ste_v0_set_rx_pop_vlan(last_ste);
......@@ -496,13 +511,13 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
}
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_MODIFY_PKT,
DR_STE_TYPE_MODIFY_PKT,
attr->gvmi);
else
dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
......@@ -510,10 +525,10 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
}
if (action_type_set[DR_ACTION_TYP_TAG]) {
if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_RX,
DR_STE_TYPE_RX,
attr->gvmi);
dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
......@@ -1157,6 +1172,7 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
......@@ -1168,6 +1184,11 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
if (sb->inner)
DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label);
else
DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label);
if (spec->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
spec->tcp_flags = 0;
......@@ -1772,7 +1793,7 @@ dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
......@@ -1802,7 +1823,7 @@ static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s
static int
dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
......@@ -1829,7 +1850,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
static int
dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
......
......@@ -322,7 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
}
static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
u8 entry_type, u16 gvmi)
bool is_rx, u16 gvmi)
{
dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
......@@ -402,8 +402,23 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
u32 vlan_hdr)
static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
u8 anchor, u8 offset,
int size)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
/* The hardware expects here size and offset in words (2 byte) */
MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
......@@ -416,7 +431,7 @@ static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
......@@ -503,13 +518,28 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
bool allow_modify_hdr = true;
bool allow_encap = true;
if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
}
if (action_type_set[DR_ACTION_TYP_CTR])
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1,
......@@ -534,7 +564,8 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]);
dr_ste_v1_set_push_vlan(last_ste, action,
attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
......@@ -579,6 +610,18 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_remove_hdr(last_ste, action,
attr->reformat.param_0,
attr->reformat.param_1,
attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
......@@ -635,7 +678,7 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_ctr = false;
}
dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count);
dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
......@@ -656,6 +699,26 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_DOUBLE_SZ;
}
if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
int i;
for (i = 0; i < attr->vlans.count; i++) {
if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
!allow_modify_hdr) {
dr_ste_v1_arr_init_next_match(&last_ste,
added_stes,
attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_push_vlan(last_ste, action,
attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
}
if (action_type_set[DR_ACTION_TYP_CTR]) {
/* Counter action set after decap and before insert_hdr
* to exclude decaped / encaped header respectively.
......@@ -714,6 +777,20 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = true;
allow_ctr = true;
}
dr_ste_v1_set_remove_hdr(last_ste, action,
attr->reformat.param_0,
attr->reformat.param_1,
attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
......@@ -1844,7 +1921,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
......@@ -1868,7 +1945,7 @@ static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s
static int
dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
......@@ -1895,7 +1972,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
static int
dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
......@@ -1960,7 +2037,9 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_byte_mask = &dr_ste_v1_set_byte_mask,
.get_byte_mask = &dr_ste_v1_get_byte_mask,
/* Actions */
.actions_caps = DR_STE_CTX_ACTION_CAP_RX_ENCAP,
.actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
DR_STE_CTX_ACTION_CAP_RX_PUSH |
DR_STE_CTX_ACTION_CAP_RX_ENCAP,
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
......
......@@ -83,15 +83,14 @@ enum {
DR_STE_SIZE_CTRL = 32,
DR_STE_SIZE_TAG = 16,
DR_STE_SIZE_MASK = 16,
};
enum {
DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
};
enum mlx5dr_ste_ctx_action_cap {
DR_STE_CTX_ACTION_CAP_NONE = 0,
DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 0,
DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0,
DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1,
DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
};
enum {
......@@ -124,6 +123,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_POP_VLAN,
DR_ACTION_TYP_PUSH_VLAN,
DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
DR_ACTION_TYP_MAX,
};
......@@ -140,6 +140,7 @@ struct mlx5dr_icm_buddy_mem;
struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
struct mlx5dr_rule_rx_tx;
struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste_ctx;
......@@ -151,14 +152,14 @@ struct mlx5dr_ste {
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
/* each rule member that uses this ste attached here */
struct list_head rule_list;
/* this ste is member of htbl */
struct mlx5dr_ste_htbl *htbl;
struct mlx5dr_ste_htbl *next_htbl;
/* The rule this STE belongs to */
struct mlx5dr_rule_rx_tx *rule_rx_tx;
/* this ste is part of a rule, located in ste's chain */
u8 ste_chain_location;
};
......@@ -171,8 +172,6 @@ struct mlx5dr_ste_htbl_ctrl {
/* total number of collisions entries attached to this table */
unsigned int num_of_collisions;
unsigned int increase_threshold;
u8 may_grow:1;
};
struct mlx5dr_ste_htbl {
......@@ -804,10 +803,15 @@ struct mlx5dr_cmd_caps {
u8 isolate_vl_tc:1;
};
enum mlx5dr_domain_nic_type {
DR_DOMAIN_NIC_TYPE_RX,
DR_DOMAIN_NIC_TYPE_TX,
};
struct mlx5dr_domain_rx_tx {
u64 drop_icm_addr;
u64 default_icm_addr;
enum mlx5dr_ste_entry_type ste_type;
enum mlx5dr_domain_nic_type type;
struct mutex mutex; /* protect rx/tx domain */
};
......@@ -885,14 +889,6 @@ struct mlx5dr_matcher {
struct mlx5dv_flow_matcher *dv_matcher;
};
struct mlx5dr_rule_member {
struct mlx5dr_ste *ste;
/* attached to mlx5dr_rule via this */
struct list_head list;
/* attached to mlx5dr_ste via this */
struct list_head use_ste_list;
};
struct mlx5dr_ste_action_modify_field {
u16 hw_field;
u8 start;
......@@ -993,8 +989,8 @@ struct mlx5dr_htbl_connect_info {
};
struct mlx5dr_rule_rx_tx {
struct list_head rule_members_list;
struct mlx5dr_matcher_rx_tx *nic_matcher;
struct mlx5dr_ste *last_rule_ste;
};
struct mlx5dr_rule {
......@@ -1005,8 +1001,12 @@ struct mlx5dr_rule {
u32 flow_source;
};
void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
struct mlx5dr_ste *ste);
void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
struct mlx5dr_ste *ste,
bool force);
int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_ste *curr_ste,
int *num_of_stes);
struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
......@@ -1083,6 +1083,25 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
return entry_size * num_of_entries;
}
static inline int
mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
{
int num_of_entries =
mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
/* Threshold is 50%, one is added to table of size 1 */
return (num_of_entries + 1) / 2;
}
static inline bool
mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
{
if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
return false;
return true;
}
static inline struct mlx5dr_cmd_vport_cap *
mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
{
......@@ -1216,7 +1235,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
bool update_hw_ste);
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
struct mlx5dr_htbl_connect_info *connect_info);
......@@ -1282,6 +1301,7 @@ struct mlx5dr_send_ring {
u8 sync_buff[MIN_READ_SYNC];
struct mlx5dr_mr *sync_mr;
spinlock_t lock; /* Protect the data path of the send ring */
bool err_state; /* send_ring is not usable in err state */
};
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
......@@ -1333,6 +1353,7 @@ struct mlx5dr_cmd_fte_info {
u32 *val;
struct mlx5_flow_act action;
struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
bool ignore_flow_level;
};
int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
......@@ -1362,7 +1383,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
int num_dest,
bool reformat_req,
u32 *tbl_id,
u32 *group_id);
u32 *group_id,
bool ignore_flow_level);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
#endif /* _DR_TYPES_H_ */
......@@ -133,6 +133,9 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
}
......@@ -487,9 +490,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions);
num_term_actions,
ignore_flow_level);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
......@@ -557,6 +564,9 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
case MLX5_REFORMAT_TYPE_INSERT_HDR:
dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR;
break;
case MLX5_REFORMAT_TYPE_REMOVE_HDR:
dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR;
break;
default:
mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
params->type);
......@@ -615,15 +625,6 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n
mlx5dr_action_destroy(modify_hdr->action.dr_action);
}
static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
int modify_mask,
struct fs_fte *fte)
{
return -EOPNOTSUPP;
}
static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
......@@ -648,6 +649,36 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
return 0;
}
static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
int modify_mask,
struct fs_fte *fte)
{
struct fs_fte fte_tmp = {};
int ret;
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
fte_tmp.fs_dr_rule = fte->fs_dr_rule;
memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule));
/* First add the new updated rule, then delete the old rule */
ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte);
if (ret)
goto restore_fte;
ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp);
WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n");
return ret;
restore_fte:
fte->fs_dr_rule = fte_tmp.fs_dr_rule;
return ret;
}
static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
......
......@@ -8,12 +8,6 @@ enum {
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
};
enum mlx5dr_ste_entry_type {
MLX5DR_STE_TYPE_TX = 1,
MLX5DR_STE_TYPE_RX = 2,
MLX5DR_STE_TYPE_MODIFY_PKT = 6,
};
struct mlx5_ifc_ste_general_bits {
u8 entry_type[0x4];
u8 reserved_at_4[0x4];
......
......@@ -27,6 +27,7 @@ enum mlx5dr_action_reformat_type {
DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
DR_ACTION_REFORMAT_TYP_INSERT_HDR,
DR_ACTION_REFORMAT_TYP_REMOVE_HDR,
};
struct mlx5dr_match_parameters {
......@@ -94,7 +95,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests);
u32 num_of_dests,
bool ignore_flow_level);
struct mlx5dr_action *mlx5dr_action_create_drop(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment