Commit fbf6836d authored by Paul Blakey's avatar Paul Blakey Committed by Saeed Mahameed

net/mlx5: CT: Create smfs dr matchers dynamically

SMFS dr matchers are processed sequentially in hardware according to
their priorities, and not skipped if empty.

Currently, smfs ct fs creates four predefined dr matchers per ct
table (ct/ct nat) with hardcoded priority. Compared to dmfs ct fs
using autogroups, this might cause additional hops in fastpath for
traffic patterns that match later priorties, even if previous
priorites are empty, e.g user only using ipv6 UDP traffic will
have additional 3 hops.

Create the matchers dynamically, using the highest priority available,
on first rule usage, and remove them on last usage.
Signed-off-by: default avatarPaul Blakey <paulb@nvidia.com>
Reviewed-by: default avatarOz Shlomo <ozsh@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 3ee61ebb
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
#include <linux/refcount.h>
#include "en_tc.h" #include "en_tc.h"
#include "en/tc_priv.h" #include "en/tc_priv.h"
#include "en/tc_ct.h" #include "en/tc_ct.h"
...@@ -13,24 +15,32 @@ ...@@ -13,24 +15,32 @@
netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args) netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16) #define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
struct mlx5_ct_fs_smfs_matcher {
struct mlx5dr_matcher *dr_matcher;
struct list_head list;
int prio;
refcount_t ref;
};
struct mlx5_ct_fs_smfs_matchers { struct mlx5_ct_fs_smfs_matchers {
struct mlx5dr_matcher *ipv4_tcp; struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
struct mlx5dr_matcher *ipv4_udp; struct list_head used;
struct mlx5dr_matcher *ipv6_tcp;
struct mlx5dr_matcher *ipv6_udp;
}; };
struct mlx5_ct_fs_smfs { struct mlx5_ct_fs_smfs {
struct mlx5_ct_fs_smfs_matchers ct_matchers; struct mlx5dr_table *ct_tbl, *ct_nat_tbl;
struct mlx5_ct_fs_smfs_matchers ct_matchers_nat; struct mlx5_ct_fs_smfs_matchers matchers;
struct mlx5_ct_fs_smfs_matchers matchers_nat;
struct mlx5dr_action *fwd_action; struct mlx5dr_action *fwd_action;
struct mlx5_flow_table *ct_nat; struct mlx5_flow_table *ct_nat;
struct mutex lock; /* Guards matchers */
}; };
struct mlx5_ct_fs_smfs_rule { struct mlx5_ct_fs_smfs_rule {
struct mlx5_ct_fs_rule fs_rule; struct mlx5_ct_fs_rule fs_rule;
struct mlx5dr_rule *rule; struct mlx5dr_rule *rule;
struct mlx5dr_action *count_action; struct mlx5dr_action *count_action;
struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
}; };
static inline void static inline void
...@@ -97,71 +107,75 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, ...@@ -97,71 +107,75 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
return dr_matcher; return dr_matcher;
} }
static int static struct mlx5_ct_fs_smfs_matcher *
mlx5_ct_fs_smfs_matchers_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
struct mlx5_ct_fs_smfs_matchers *ct_matchers)
{ {
const struct net_device *netdev = fs->netdev; struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
u32 prio = 0; struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
int err; struct mlx5_ct_fs_smfs_matchers *matchers;
struct mlx5dr_matcher *dr_matcher;
ct_matchers->ipv4_tcp = mlx5_ct_fs_smfs_matcher_create(fs, tbl, true, true, prio); struct mlx5dr_table *tbl;
if (IS_ERR(ct_matchers->ipv4_tcp)) { struct list_head *prev;
err = PTR_ERR(ct_matchers->ipv4_tcp); int prio;
netdev_warn(netdev,
"%s, failed to create ipv4 tcp matcher, err: %d\n",
INIT_ERR_PREFIX, err);
return err;
}
++prio; matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
ct_matchers->ipv4_udp = mlx5_ct_fs_smfs_matcher_create(fs, tbl, true, false, prio); smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
if (IS_ERR(ct_matchers->ipv4_udp)) {
err = PTR_ERR(ct_matchers->ipv4_udp); if (refcount_inc_not_zero(&smfs_matcher->ref))
netdev_warn(netdev, return smfs_matcher;
"%s, failed to create ipv4 udp matcher, err: %d\n",
INIT_ERR_PREFIX, err); mutex_lock(&fs_smfs->lock);
goto err_matcher_ipv4_udp;
} /* Retry with lock, as another thread might have already created the relevant matcher
* till we acquired the lock
*/
if (refcount_inc_not_zero(&smfs_matcher->ref))
goto out_unlock;
// Find next available priority in sorted used list
prio = 0;
prev = &matchers->used;
list_for_each_entry(m, &matchers->used, list) {
prev = &m->list;
++prio; if (m->prio == prio)
ct_matchers->ipv6_tcp = mlx5_ct_fs_smfs_matcher_create(fs, tbl, false, true, prio); prio = m->prio + 1;
if (IS_ERR(ct_matchers->ipv6_tcp)) { else
err = PTR_ERR(ct_matchers->ipv6_tcp); break;
netdev_warn(netdev,
"%s, failed to create ipv6 tcp matcher, err: %d\n",
INIT_ERR_PREFIX, err);
goto err_matcher_ipv6_tcp;
} }
++prio; tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
ct_matchers->ipv6_udp = mlx5_ct_fs_smfs_matcher_create(fs, tbl, false, false, prio); dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
if (IS_ERR(ct_matchers->ipv6_udp)) { if (IS_ERR(dr_matcher)) {
err = PTR_ERR(ct_matchers->ipv6_udp); netdev_warn(fs->netdev,
netdev_warn(netdev, "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
"%s, failed to create ipv6 tcp matcher, err: %d\n", nat, ipv4, tcp, PTR_ERR(dr_matcher));
INIT_ERR_PREFIX, err);
goto err_matcher_ipv6_udp; smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock;
} }
return 0; smfs_matcher->dr_matcher = dr_matcher;
smfs_matcher->prio = prio;
list_add(&smfs_matcher->list, prev);
refcount_set(&smfs_matcher->ref, 1);
err_matcher_ipv6_udp: out_unlock:
mlx5_smfs_matcher_destroy(ct_matchers->ipv6_tcp); mutex_unlock(&fs_smfs->lock);
err_matcher_ipv6_tcp: return smfs_matcher;
mlx5_smfs_matcher_destroy(ct_matchers->ipv4_udp);
err_matcher_ipv4_udp:
mlx5_smfs_matcher_destroy(ct_matchers->ipv4_tcp);
return 0;
} }
static void static void
mlx5_ct_fs_smfs_matchers_destroy(struct mlx5_ct_fs_smfs_matchers *ct_matchers) mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher)
{ {
mlx5_smfs_matcher_destroy(ct_matchers->ipv6_udp); struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
mlx5_smfs_matcher_destroy(ct_matchers->ipv6_tcp);
mlx5_smfs_matcher_destroy(ct_matchers->ipv4_udp); if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock))
mlx5_smfs_matcher_destroy(ct_matchers->ipv4_tcp); return;
mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher);
list_del(&smfs_matcher->list);
mutex_unlock(&fs_smfs->lock);
} }
static int static int
...@@ -170,7 +184,6 @@ mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct, ...@@ -170,7 +184,6 @@ mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
{ {
struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl; struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
int err;
post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct); post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct);
ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat); ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat);
...@@ -184,28 +197,18 @@ mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct, ...@@ -184,28 +197,18 @@ mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
ct_dbg("using smfs steering"); ct_dbg("using smfs steering");
err = mlx5_ct_fs_smfs_matchers_create(fs, ct_tbl, &fs_smfs->ct_matchers);
if (err)
goto err_init;
err = mlx5_ct_fs_smfs_matchers_create(fs, ct_nat_tbl, &fs_smfs->ct_matchers_nat);
if (err)
goto err_matchers_nat;
fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl); fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl);
if (!fs_smfs->fwd_action) { if (!fs_smfs->fwd_action) {
err = -EINVAL; return -EINVAL;
goto err_action_create;
} }
return 0; fs_smfs->ct_tbl = ct_tbl;
fs_smfs->ct_nat_tbl = ct_nat_tbl;
mutex_init(&fs_smfs->lock);
INIT_LIST_HEAD(&fs_smfs->matchers.used);
INIT_LIST_HEAD(&fs_smfs->matchers_nat.used);
err_action_create: return 0;
mlx5_ct_fs_smfs_matchers_destroy(&fs_smfs->ct_matchers_nat);
err_matchers_nat:
mlx5_ct_fs_smfs_matchers_destroy(&fs_smfs->ct_matchers);
err_init:
return err;
} }
static void static void
...@@ -214,8 +217,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs) ...@@ -214,8 +217,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
mlx5_smfs_action_destroy(fs_smfs->fwd_action); mlx5_smfs_action_destroy(fs_smfs->fwd_action);
mlx5_ct_fs_smfs_matchers_destroy(&fs_smfs->ct_matchers_nat);
mlx5_ct_fs_smfs_matchers_destroy(&fs_smfs->ct_matchers);
} }
static inline bool static inline bool
...@@ -285,10 +286,9 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, ...@@ -285,10 +286,9 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr, struct flow_rule *flow_rule) struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
{ {
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
struct mlx5_ct_fs_smfs_matchers *matchers; struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
struct mlx5_ct_fs_smfs_rule *smfs_rule; struct mlx5_ct_fs_smfs_rule *smfs_rule;
struct mlx5dr_action *actions[5]; struct mlx5dr_action *actions[5];
struct mlx5dr_matcher *matcher;
struct mlx5dr_rule *rule; struct mlx5dr_rule *rule;
int num_actions = 0, err; int num_actions = 0, err;
bool nat, tcp, ipv4; bool nat, tcp, ipv4;
...@@ -315,22 +315,27 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, ...@@ -315,22 +315,27 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
tcp = MLX5_GET(fte_match_param, spec->match_value, tcp = MLX5_GET(fte_match_param, spec->match_value,
outer_headers.ip_protocol) == IPPROTO_TCP; outer_headers.ip_protocol) == IPPROTO_TCP;
matchers = nat ? &fs_smfs->ct_matchers_nat : &fs_smfs->ct_matchers; smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
matcher = ipv4 ? (tcp ? matchers->ipv4_tcp : matchers->ipv4_udp) : if (IS_ERR(smfs_matcher)) {
(tcp ? matchers->ipv6_tcp : matchers->ipv6_udp); err = PTR_ERR(smfs_matcher);
goto err_matcher;
}
rule = mlx5_smfs_rule_create(matcher, spec, num_actions, actions, rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT); MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
if (!rule) { if (!rule) {
err = -EINVAL; err = -EINVAL;
goto err_rule; goto err_create;
} }
smfs_rule->rule = rule; smfs_rule->rule = rule;
smfs_rule->smfs_matcher = smfs_matcher;
return &smfs_rule->fs_rule; return &smfs_rule->fs_rule;
err_rule: err_create:
mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher);
err_matcher:
mlx5_smfs_action_destroy(smfs_rule->count_action); mlx5_smfs_action_destroy(smfs_rule->count_action);
err_count: err_count:
kfree(smfs_rule); kfree(smfs_rule);
...@@ -345,6 +350,7 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru ...@@ -345,6 +350,7 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
fs_rule); fs_rule);
mlx5_smfs_rule_destroy(smfs_rule->rule); mlx5_smfs_rule_destroy(smfs_rule->rule);
mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher);
mlx5_smfs_action_destroy(smfs_rule->count_action); mlx5_smfs_action_destroy(smfs_rule->count_action);
kfree(smfs_rule); kfree(smfs_rule);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment