Commit 2111bb97 authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: HWS, added backward-compatible API handling

Added implementation of backward-compatible (BWC) steering API.
Native HWS API is very different from SWS API:
 - SWS is synchronous (rule creation/deletion API call returns when
   the rule is created/deleted), while HWS is asynchronous (it
   requires polling for completion in order to know when the rule
   creation/deletion happened)
 - SWS manages its own memory (it allocates/frees all the needed
   memory for steering rules, while HWS requires the rules memory
   to be allocated/freed outside the API

In order to make HWS fit the existing fs-core steering API paradigm,
this patch adds implementation of backward-compatible (BWC) steering
API that has the bahaviour similar to SWS: among others, it encompasses
all the rules' memory management and completion polling, presenting
the usual synchronous API for the upper layer.

A user that wishes to utilize the full speed potential of HWS can
call the HWS async API and have rule insertion/deletion batching,
lower memory management overhead, and lower CPU utilization.
Such approach will be taken by the future Connection Tracking.

Note that BWC steering doesn't support yet rules that require more
than one match STE - complex rules.
This support will be added later on.
Reviewed-by: default avatarErez Shitrit <erezsh@nvidia.com>
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent c61afff9
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
#ifndef MLX5HWS_BWC_H_
#define MLX5HWS_BWC_H_
#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
#define MLX5HWS_BWC_MAX_ACTS 16
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
u8 num_of_at;
u16 priority;
u8 size_log;
u32 num_of_rules; /* atomically accessed */
struct list_head *rules;
};
struct mlx5hws_bwc_rule {
struct mlx5hws_bwc_matcher *bwc_matcher;
struct mlx5hws_rule *rule;
u16 bwc_queue_idx;
struct list_head list_node;
};
int
mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
u32 priority,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask,
enum mlx5hws_action_type action_types[]);
int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
u32 *match_param,
struct mlx5hws_rule_action rule_actions[],
u32 flow_source,
u16 bwc_queue_idx);
int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u16 bwc_queue_idx,
u32 flow_source,
struct mlx5hws_rule_attr *rule_attr);
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
* reguler HWS queues, and the other half are BWC queues.
*/
return (ctx->queues - 1) / 2;
}
static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
{
return idx + mlx5hws_bwc_queues(ctx);
}
#endif /* MLX5HWS_BWC_H_ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
#include "mlx5hws_internal.h"
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
struct mlx5hws_definer match_layout = {0};
struct mlx5hws_match_template *mt;
bool is_complex = false;
int ret;
if (!match_criteria_enable)
return false; /* empty matcher */
mt = mlx5hws_match_template_create(ctx,
mask->match_buf,
mask->match_sz,
match_criteria_enable);
if (!mt) {
mlx5hws_err(ctx, "BWC: failed creating match template\n");
return false;
}
ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
if (ret) {
/* The only case that we're interested in is E2BIG,
* which means that the match parameters need to be
* split into complex martcher.
* For all other cases (good or bad) - just return true
* and let the usual match creation path handle it,
* both for good and bad flows.
*/
if (ret == E2BIG) {
is_complex = true;
mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
} else {
mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
}
}
mlx5hws_match_template_destroy(mt);
return is_complex;
}
int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
u32 priority,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
return -EOPNOTSUPP;
}
void
mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
/* nothing to do here */
}
int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_match_parameters *params,
u32 flow_source,
struct mlx5hws_rule_action rule_actions[],
u16 bwc_queue_idx)
{
mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
"Complex rule is not supported yet\n");
return -EOPNOTSUPP;
}
int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
{
return 0;
}
int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
"Moving complex rule is not supported yet\n");
return -EOPNOTSUPP;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
#ifndef MLX5HWS_BWC_COMPLEX_H_
#define MLX5HWS_BWC_COMPLEX_H_
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask);
int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
u32 priority,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask);
void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_match_parameters *params,
u32 flow_source,
struct mlx5hws_rule_action rule_actions[],
u16 bwc_queue_idx);
int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
#endif /* MLX5HWS_BWC_COMPLEX_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment