Commit 21586a0f authored by Alex Vesker's avatar Alex Vesker Committed by Saeed Mahameed

net/mlx5: DR, Limit STE hash table enlarge based on bytemask

When an ste hash table has too many collision we enlarge it
to a bigger hash table (rehash). Rehashing collision improvement
depends on the bytemask value. The more 1 bits we have in bytemask
means better spreading in the table.

Without this fix tables can grow in size without providing any
improvement which can lead to memory depletion and failures.

This patch will limit table rehash to reduce memory and improve
the performance.

Fixes: 41d07074 ("net/mlx5: DR, Expose steering rule functionality")
Signed-off-by: default avatarAlex Vesker <valex@mellanox.com>
Reviewed-by: default avatarErez Shitrit <erezsh@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 83e79489
...@@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, ...@@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
} }
} }
static u16 dr_get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain *dmn, struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn) struct mlx5dr_domain_rx_tx *nic_dmn)
...@@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, ...@@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
if (!ctrl->may_grow) if (!ctrl->may_grow)
return false; return false;
if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
return false;
if (ctrl->num_of_collisions >= ctrl->increase_threshold && if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold) (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
return true; return true;
......
...@@ -560,18 +560,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) ...@@ -560,18 +560,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
return !refcount_read(&ste->refcount); return !refcount_read(&ste->refcount);
} }
static u16 get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
/* Init one ste as a pattern for ste data array */ /* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi, void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn, struct mlx5dr_domain_rx_tx *nic_dmn,
...@@ -620,20 +608,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, ...@@ -620,20 +608,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *next_htbl; struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
u32 bits_in_mask;
u8 next_lu_type; u8 next_lu_type;
u16 byte_mask; u16 byte_mask;
next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type); next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask); byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
/* Don't allocate table more than required,
* the size of the table defined via the byte_mask, so no need
* to allocate more than that.
*/
bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
log_table_size = min(log_table_size, bits_in_mask);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size, log_table_size,
next_lu_type, next_lu_type,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment