Commit 8ee72638 authored by Roi Dayan's avatar Roi Dayan Committed by Saeed Mahameed

net/mlx5e: Add vlan push/pop/mangle to tc action infra

Add parsing support by implementing struct mlx5e_tc_act
for this action.
Signed-off-by: default avatarRoi Dayan <roid@nvidia.com>
Reviewed-by: default avatarOz Shlomo <ozsh@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent e36db1ee
......@@ -49,7 +49,8 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o
en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \
en/tc/act/vlan.o en/tc/act/vlan_mangle.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
......
......@@ -15,9 +15,9 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
NULL, /* FLOW_ACTION_MIRRED, */
NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
NULL, /* FLOW_ACTION_VLAN_PUSH, */
NULL, /* FLOW_ACTION_VLAN_POP, */
NULL, /* FLOW_ACTION_VLAN_MANGLE, */
&mlx5e_tc_act_vlan,
&mlx5e_tc_act_vlan,
&mlx5e_tc_act_vlan_mangle,
&mlx5e_tc_act_tun_encap,
&mlx5e_tc_act_tun_decap,
&mlx5e_tc_act_pedit,
......
......@@ -42,6 +42,8 @@ extern struct mlx5e_tc_act mlx5e_tc_act_tun_encap;
extern struct mlx5e_tc_act mlx5e_tc_act_tun_decap;
extern struct mlx5e_tc_act mlx5e_tc_act_csum;
extern struct mlx5e_tc_act mlx5e_tc_act_pedit;
extern struct mlx5e_tc_act mlx5e_tc_act_vlan;
extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle;
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_vlan.h>
#include "act.h"
#include "vlan.h"
#include "en/tc_priv.h"
static int
parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action,
struct netlink_ext_ack *extack)
{
u8 vlan_idx = attr->total_vlan;
if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
return -EOPNOTSUPP;
}
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH)) {
NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
}
break;
case FLOW_ACTION_VLAN_PUSH:
attr->vlan_vid[vlan_idx] = act->vlan.vid;
attr->vlan_prio[vlan_idx] = act->vlan.prio;
attr->vlan_proto[vlan_idx] = act->vlan.proto;
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH)) {
NL_SET_ERR_MSG_MOD(extack,
"vlan push action is not supported for vlan depth > 1");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(act->vlan.proto != htons(ETH_P_8021Q) ||
act->vlan.prio)) {
NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
}
attr->total_vlan = vlan_idx + 1;
return 0;
}
int
mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_PUSH,
.vlan.vid = vlan_dev_vlan_id(vlan_dev),
.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
.vlan.prio = 0,
};
int err;
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
if (err)
return err;
rcu_read_lock();
*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
rcu_read_unlock();
if (!*out_dev)
return -ENODEV;
if (is_vlan_dev(*out_dev))
err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
return err;
}
int
mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
};
int nest_level, err = 0;
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
extack);
if (err)
return err;
}
return err;
}
static bool
tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int err;
if (act->id == FLOW_ACTION_VLAN_PUSH &&
(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
/* Replace vlan pop+push with vlan modify */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
attr->parse_attr, parse_state->hdrs,
&attr->action, parse_state->extack);
} else {
err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
parse_state->extack);
}
if (err)
return err;
esw_attr->split_count = esw_attr->out_count;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_vlan = {
.can_offload = tc_act_can_offload_vlan,
.parse_action = tc_act_parse_vlan,
};
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_TC_ACT_VLAN_H__
#define __MLX5_EN_TC_ACT_VLAN_H__
#include <net/flow_offload.h>
#include "en/tc_priv.h"
struct pedit_headers_action;
int
mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
struct netlink_ext_ack *extack);
int
mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack);
int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_vlan.h>
#include "act.h"
#include "vlan.h"
#include "en/tc_priv.h"
struct pedit_headers_action;
int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
u16 mask16 = VLAN_VID_MASK;
u16 val16 = act->vlan.vid & VLAN_VID_MASK;
const struct flow_action_entry pedit_act = {
.id = FLOW_ACTION_MANGLE,
.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
};
u8 match_prio_mask, match_prio_val;
void *headers_c, *headers_v;
int err;
headers_c = mlx5e_get_match_headers_criteria(*action, &parse_attr->spec);
headers_v = mlx5e_get_match_headers_value(*action, &parse_attr->spec);
if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
NL_SET_ERR_MSG_MOD(extack, "VLAN rewrite action must have VLAN protocol match");
return -EOPNOTSUPP;
}
match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
NL_SET_ERR_MSG_MOD(extack, "Changing VLAN prio is not supported");
return -EOPNOTSUPP;
}
err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
NULL, extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
}
static bool
tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
enum mlx5_flow_namespace_type ns_type;
int err;
ns_type = mlx5e_get_flow_namespace(parse_state->flow);
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
attr->parse_attr, parse_state->hdrs,
&attr->action, parse_state->extack);
if (err)
return err;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->split_count = attr->esw_attr->out_count;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = {
.can_offload = tc_act_can_offload_vlan_mangle,
.parse_action = tc_act_parse_vlan_mangle,
};
......@@ -183,4 +183,8 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
#endif /* __MLX5_EN_TC_PRIV_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment