Commit 110d8465 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-multichain-tc-offload'

Jiri Pirko says:

====================
mlxsw: spectrum: Introduce multichain TC offload

This patchset introduces offloading of rules added to chain with
non-zero index, which was previously forbidden. Also, goto_chain
termination action is offloaded allowing to jump to processing
of desired chain.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ae99e188 0ede6ba2
......@@ -1733,9 +1733,6 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
else
return -EOPNOTSUPP;
if (f->common.chain_index)
return -EOPNOTSUPP;
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
......
......@@ -417,6 +417,7 @@ struct mlxsw_sp_acl_profile_ops {
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct net_device *dev, bool ingress);
void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
u16 (*ruleset_group_id)(void *ruleset_priv);
size_t rule_priv_size;
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
......@@ -440,11 +441,16 @@ struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev, bool ingress,
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
bool ingress, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
bool ingress, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl);
......
......@@ -74,6 +74,7 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
struct mlxsw_sp_acl_ruleset_ht_key {
struct net_device *dev; /* dev this ruleset is bound to */
bool ingress;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
......@@ -163,7 +164,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
struct net_device *dev, bool ingress)
struct net_device *dev, bool ingress,
u32 chain_index)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
......@@ -171,13 +173,20 @@ static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
ruleset->ht_key.dev = dev;
ruleset->ht_key.ingress = ingress;
ruleset->ht_key.chain_index = chain_index;
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
if (err)
return err;
err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
if (err)
goto err_ops_ruleset_bind;
if (!ruleset->ht_key.chain_index) {
/* We only need ruleset with chain index 0, the implicit one,
* to be directly bound to device. The rest of the rulesets
* are bound by "Goto action set".
*/
err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
if (err)
goto err_ops_ruleset_bind;
}
return 0;
err_ops_ruleset_bind:
......@@ -192,7 +201,8 @@ static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
if (!ruleset->ht_key.chain_index)
ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
}
......@@ -211,14 +221,48 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
static struct mlxsw_sp_acl_ruleset *
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
bool ingress, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
ht_key.dev = dev;
ht_key.ingress = ingress;
ht_key.chain_index = chain_index;
ht_key.ops = ops;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
mlxsw_sp_acl_ruleset_ht_params);
}
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev, bool ingress,
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
bool ingress, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
chain_index, ops);
if (!ruleset)
return ERR_PTR(-ENOENT);
return ruleset;
}
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
bool ingress, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct mlxsw_sp_acl_ruleset *ruleset;
int err;
......@@ -226,12 +270,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
if (!ops)
return ERR_PTR(-EINVAL);
memset(&ht_key, 0, sizeof(ht_key));
ht_key.dev = dev;
ht_key.ingress = ingress;
ht_key.ops = ops;
ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
mlxsw_sp_acl_ruleset_ht_params);
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
chain_index, ops);
if (ruleset) {
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
......@@ -239,7 +279,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
if (IS_ERR(ruleset))
return ruleset;
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
ingress, chain_index);
if (err)
goto err_ruleset_bind;
return ruleset;
......@@ -255,6 +296,13 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
}
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
return ops->ruleset_group_id(ruleset->priv);
}
static int
mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
......
......@@ -295,6 +295,12 @@ mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
static u16
mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
{
return group->id;
}
static unsigned int
mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
{
......@@ -1063,6 +1069,14 @@ mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
}
static u16
mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
}
static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
......@@ -1099,6 +1113,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
.ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
.ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
.ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
.rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
......
......@@ -45,7 +45,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
struct net_device *dev, bool ingress,
struct mlxsw_sp_acl_rule_info *rulei,
struct tcf_exts *exts)
{
......@@ -71,6 +71,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_rulei_act_trap(rulei);
if (err)
return err;
} else if (is_tcf_gact_goto_chain(a)) {
u32 chain_index = tcf_gact_goto_chain_index(a);
struct mlxsw_sp_acl_ruleset *ruleset;
u16 group_id;
ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
ingress,
chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
} else if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
......@@ -246,7 +260,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
struct net_device *dev, bool ingress,
struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
......@@ -364,7 +378,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
rulei, f->exts);
}
int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
......@@ -378,6 +393,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
int err;
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
......@@ -389,7 +405,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
}
rulei = mlxsw_sp_acl_rule_rulei(rule);
err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f);
err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
if (err)
goto err_flower_parse;
......@@ -421,7 +437,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct mlxsw_sp_acl_rule *rule;
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
ingress,
ingress, f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
return;
......@@ -447,7 +463,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
int err;
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
ingress,
ingress, f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
......
......@@ -15,7 +15,8 @@ struct tcf_gact {
};
#define to_gact(a) ((struct tcf_gact *)a)
static inline bool __is_tcf_gact_act(const struct tc_action *a, int act)
static inline bool __is_tcf_gact_act(const struct tc_action *a, int act,
bool is_ext)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_gact *gact;
......@@ -24,7 +25,8 @@ static inline bool __is_tcf_gact_act(const struct tc_action *a, int act)
return false;
gact = to_gact(a);
if (gact->tcf_action == act)
if ((!is_ext && gact->tcf_action == act) ||
(is_ext && TC_ACT_EXT_CMP(gact->tcf_action, act)))
return true;
#endif
......@@ -33,12 +35,22 @@ static inline bool __is_tcf_gact_act(const struct tc_action *a, int act)
static inline bool is_tcf_gact_shot(const struct tc_action *a)
{
return __is_tcf_gact_act(a, TC_ACT_SHOT);
return __is_tcf_gact_act(a, TC_ACT_SHOT, false);
}
static inline bool is_tcf_gact_trap(const struct tc_action *a)
{
return __is_tcf_gact_act(a, TC_ACT_TRAP);
return __is_tcf_gact_act(a, TC_ACT_TRAP, false);
}
static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
{
return __is_tcf_gact_act(a, TC_ACT_GOTO_CHAIN, true);
}
static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
{
return a->goto_chain->index;
}
#endif /* __NET_TC_GACT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment