Commit f71f1bcb authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saed Mahameed says:

====================
mlx5-updates-2021-12-14

Parsing Infrastructure for TC actions:

The series introduce a TC action infrastructure to help
parsing TC actions in a generic way for both FDB and NIC rules.

To help maintain the parsing code of TC actions, we the parsing code to
action parser per action TC type in separate files, instead of having one
big switch case loop, duplicated between FDB and NIC parsers as before this
patchset.

Each TC flow_action->id is represented by a dedicated mlx5e_tc_act handler
which has callbacks to check if the specific action is offload supported and
to parse the specific action.

We move each case (TC action) handling into the specific handler, which is
responsible for parsing and determining if the action is supported.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5a21bf5b 35bb5242
......@@ -46,6 +46,15 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
en/tc/post_act.o en/tc/int_port.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \
en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
en/tc/act/mirred.o en/tc/act/mirred_nic.o \
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
en/tc/act/redirect_ingress.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_accept = {
.can_offload = tc_act_can_offload_accept,
.parse_action = tc_act_parse_accept,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
#include "mlx5_core.h"
/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
&mlx5e_tc_act_accept,
&mlx5e_tc_act_drop,
&mlx5e_tc_act_trap,
&mlx5e_tc_act_goto,
&mlx5e_tc_act_mirred,
&mlx5e_tc_act_mirred,
&mlx5e_tc_act_redirect_ingress,
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
&mlx5e_tc_act_vlan,
&mlx5e_tc_act_vlan,
&mlx5e_tc_act_vlan_mangle,
&mlx5e_tc_act_tun_encap,
&mlx5e_tc_act_tun_decap,
&mlx5e_tc_act_pedit,
&mlx5e_tc_act_pedit,
&mlx5e_tc_act_csum,
NULL, /* FLOW_ACTION_MARK, */
&mlx5e_tc_act_ptype,
NULL, /* FLOW_ACTION_PRIORITY, */
NULL, /* FLOW_ACTION_WAKE, */
NULL, /* FLOW_ACTION_QUEUE, */
&mlx5e_tc_act_sample,
NULL, /* FLOW_ACTION_POLICE, */
&mlx5e_tc_act_ct,
NULL, /* FLOW_ACTION_CT_METADATA, */
&mlx5e_tc_act_mpls_push,
&mlx5e_tc_act_mpls_pop,
};
/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
&mlx5e_tc_act_accept,
&mlx5e_tc_act_drop,
NULL, /* FLOW_ACTION_TRAP, */
&mlx5e_tc_act_goto,
&mlx5e_tc_act_mirred_nic,
NULL, /* FLOW_ACTION_MIRRED, */
NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
NULL, /* FLOW_ACTION_VLAN_PUSH, */
NULL, /* FLOW_ACTION_VLAN_POP, */
NULL, /* FLOW_ACTION_VLAN_MANGLE, */
NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
&mlx5e_tc_act_pedit,
&mlx5e_tc_act_pedit,
&mlx5e_tc_act_csum,
&mlx5e_tc_act_mark,
NULL, /* FLOW_ACTION_PTYPE, */
NULL, /* FLOW_ACTION_PRIORITY, */
NULL, /* FLOW_ACTION_WAKE, */
NULL, /* FLOW_ACTION_QUEUE, */
NULL, /* FLOW_ACTION_SAMPLE, */
NULL, /* FLOW_ACTION_POLICE, */
&mlx5e_tc_act_ct,
};
/**
* mlx5e_tc_act_get() - Get an action parser for an action id.
* @act_id: Flow action id.
* @ns_type: flow namespace type.
*/
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
enum mlx5_flow_namespace_type ns_type)
{
struct mlx5e_tc_act **tc_acts;
tc_acts = ns_type == MLX5_FLOW_NAMESPACE_FDB ? tc_acts_fdb : tc_acts_nic;
return tc_acts[act_id];
}
/**
* mlx5e_tc_act_init_parse_state() - Init a new parse_state.
* @parse_state: Parsing state.
* @flow: mlx5e tc flow being handled.
* @flow_action: flow action to parse.
* @extack: to set an error msg.
*
* The same parse_state should be passed to action parsers
* for tracking the current parsing state.
*/
void
mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_flow *flow,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
memset(parse_state, 0, sizeof(*parse_state));
parse_state->flow = flow;
parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_TC_ACT_H__
#define __MLX5_EN_TC_ACT_H__
#include <net/tc_act/tc_pedit.h>
#include <net/flow_offload.h>
#include <linux/netlink.h>
#include "eswitch.h"
#include "pedit.h"
struct mlx5_flow_attr;
struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
bool encap;
bool decap;
bool mpls_push;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;
struct mlx5_tc_ct_priv *ct_priv;
};
struct mlx5e_tc_act {
bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index);
int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
};
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
extern struct mlx5e_tc_act mlx5e_tc_act_trap;
extern struct mlx5e_tc_act mlx5e_tc_act_accept;
extern struct mlx5e_tc_act mlx5e_tc_act_mark;
extern struct mlx5e_tc_act mlx5e_tc_act_goto;
extern struct mlx5e_tc_act mlx5e_tc_act_tun_encap;
extern struct mlx5e_tc_act mlx5e_tc_act_tun_decap;
extern struct mlx5e_tc_act mlx5e_tc_act_csum;
extern struct mlx5e_tc_act mlx5e_tc_act_pedit;
extern struct mlx5e_tc_act mlx5e_tc_act_vlan;
extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle;
extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push;
extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop;
extern struct mlx5e_tc_act mlx5e_tc_act_mirred;
extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic;
extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
enum mlx5_flow_namespace_type ns_type);
void
mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_flow *flow,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_H__ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/tc_act/tc_csum.h>
#include "act.h"
#include "en/tc_priv.h"
static bool
csum_offload_supported(struct mlx5e_priv *priv,
u32 action,
u32 update_flags,
struct netlink_ext_ack *extack)
{
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
TCA_CSUM_UPDATE_FLAG_UDP;
/* The HW recalcs checksums only if re-writing headers */
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
NL_SET_ERR_MSG_MOD(extack,
"TC csum action is only offloaded with pedit");
netdev_warn(priv->netdev,
"TC csum action is only offloaded with pedit\n");
return false;
}
if (update_flags & ~prot_flags) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload TC csum action for some header/s");
netdev_warn(priv->netdev,
"can't offload TC csum action for some header/s - flags %#x\n",
update_flags);
return false;
}
return true;
}
static bool
tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct mlx5e_tc_flow *flow = parse_state->flow;
return csum_offload_supported(flow->priv, flow->attr->action,
act->csum_flags, parse_state->extack);
}
static int
tc_act_parse_csum(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_csum = {
.can_offload = tc_act_can_offload_csum,
.parse_action = tc_act_parse_csum,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
#include "en/tc_ct.h"
static bool
tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (flow_flag_test(parse_state->flow, SAMPLE)) {
NL_SET_ERR_MSG_MOD(extack,
"Sample action with connection tracking is not supported");
return false;
}
return true;
}
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
int err;
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
&attr->parse_attr->mod_hdr_acts,
act, parse_state->extack);
if (err)
return err;
flow_flag_set(parse_state->flow, CT);
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_drop = {
.can_offload = tc_act_can_offload_drop,
.parse_action = tc_act_parse_drop,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
#include "eswitch.h"
static int
validate_goto_chain(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
bool is_esw = mlx5e_is_eswitch_flow(flow);
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
struct mlx5_fs_chains *chains;
struct mlx5_eswitch *esw;
u32 reformat_and_fwd;
u32 max_chain;
esw = priv->mdev->priv.eswitch;
chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
return -EOPNOTSUPP;
}
if (!mlx5_chains_backwards_supported(chains) &&
dest_chain <= flow->attr->chain) {
NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported");
return -EOPNOTSUPP;
}
if (dest_chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested destination chain is out of supported range");
return -EOPNOTSUPP;
}
if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
!reformat_and_fwd) {
NL_SET_ERR_MSG_MOD(extack,
"Goto chain is not allowed if action has reformat or decap");
return -EOPNOTSUPP;
}
return 0;
}
static bool
tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
if (validate_goto_chain(flow->priv, flow, act, extack))
return false;
return true;
}
static int
tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
return 0;
}
static int
tc_act_post_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
if (!attr->dest_chain)
return 0;
if (parse_state->decap) {
/* It can be supported if we'll create a mapping for
* the tunnel device only (without tunnel), and set
* this tunnel id with this decap flow.
*
* On restore (miss), we'll just set this saved tunnel
* device.
*/
NL_SET_ERR_MSG_MOD(extack, "Decap with goto isn't supported");
netdev_warn(priv->netdev, "Decap with goto isn't supported");
return -EOPNOTSUPP;
}
if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) {
NL_SET_ERR_MSG_MOD(extack, "Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_goto = {
.can_offload = tc_act_can_offload_goto,
.parse_action = tc_act_parse_goto,
.post_parse = tc_act_post_parse_goto,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en_tc.h"
static bool
tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported");
return false;
}
return true;
}
static int
tc_act_parse_mark(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->nic_attr->flow_tag = act->mark;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_mark = {
.can_offload = tc_act_can_offload_mark,
.parse_action = tc_act_parse_mark,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_macvlan.h>
#include <linux/if_vlan.h>
#include <net/bareudp.h>
#include <net/bonding.h>
#include "act.h"
#include "vlan.h"
#include "en/tc_tun_encap.h"
#include "en/tc_priv.h"
#include "en_rep.h"
static bool
same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev)
{
return mlx5e_eswitch_vf_rep(priv->netdev) &&
priv->netdev == out_dev;
}
static int
verify_uplink_forwarding(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rep_priv;
/* Forwarding non encapsulated traffic between
* uplink ports is allowed only if
* termination_table_raw_traffic cap is set.
*
* Input vport was stored attr->in_rep.
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
rep_priv = mlx5e_rep_to_rep_priv(attr->esw_attr->in_rep);
if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
mlx5e_eswitch_uplink_rep(out_dev)))
return 0;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
termination_table_raw_traffic)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are both uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
} else if (out_dev != rep_priv->netdev) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not the same uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
}
return 0;
}
static bool
is_duplicated_output_device(struct net_device *dev,
struct net_device *out_dev,
int *ifindexes, int if_count,
struct netlink_ext_ack *extack)
{
int i;
for (i = 0; i < if_count; i++) {
if (ifindexes[i] == out_dev->ifindex) {
NL_SET_ERR_MSG_MOD(extack, "can't duplicate output to same device");
netdev_err(dev, "can't duplicate output to same device: %s\n",
out_dev->name);
return true;
}
}
return false;
}
static struct net_device *
get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev)
{
struct net_device *fdb_out_dev = out_dev;
struct net_device *uplink_upper;
rcu_read_lock();
uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
if (uplink_upper && netif_is_lag_master(uplink_upper) &&
uplink_upper == out_dev) {
fdb_out_dev = uplink_dev;
} else if (netif_is_lag_master(out_dev)) {
fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
if (fdb_out_dev &&
(!mlx5e_eswitch_rep(fdb_out_dev) ||
!netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
fdb_out_dev = NULL;
}
rcu_read_unlock();
return fdb_out_dev;
}
static bool
tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct net_device *out_dev = act->dev;
struct mlx5e_priv *priv = flow->priv;
struct mlx5_esw_flow_attr *esw_attr;
parse_attr = flow->attr->parse_attr;
esw_attr = flow->attr->esw_attr;
if (!out_dev) {
/* out_dev is NULL when filters with
* non-existing mirred device are replayed to
* the driver.
*/
return false;
}
if (parse_state->mpls_push && !netif_is_bareudp(out_dev)) {
NL_SET_ERR_MSG_MOD(extack, "mpls is supported only through a bareudp device");
return false;
}
if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
/* Ignore forward to self rules generated
* by adding both mlx5 devs to the flow table
* block on a normal nft offload setup.
*/
return false;
}
if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
netdev_warn(priv->netdev,
"can't support more than %d output ports, can't offload forwarding\n",
esw_attr->out_count);
return false;
}
if (parse_state->encap ||
netdev_port_same_parent_id(priv->netdev, out_dev) ||
netif_is_ovs_master(out_dev))
return true;
if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
* *attempt* to install a filter on invalid
* eswitch should not trigger an explicit error
*/
return false;
}
NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding");
netdev_warn(priv->netdev,
"devices %s %s not on same switch HW, can't offload forwarding\n",
netdev_name(priv->netdev),
out_dev->name);
return false;
}
static int
parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct net_device *out_dev = act->dev;
parse_attr->mirred_ifindex[esw_attr->out_count] = out_dev->ifindex;
parse_attr->tun_info[esw_attr->out_count] =
mlx5e_dup_tun_info(parse_state->tun_info);
if (!parse_attr->tun_info[esw_attr->out_count])
return -ENOMEM;
parse_state->encap = false;
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we handle encap */
return 0;
}
static int
parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct net_device *out_dev = act->dev;
struct net_device *uplink_dev;
struct mlx5e_priv *out_priv;
struct mlx5_eswitch *esw;
int *ifindexes;
int if_count;
int err;
esw = priv->mdev->priv.eswitch;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
ifindexes = parse_state->ifindexes;
if_count = parse_state->if_count;
if (is_duplicated_output_device(priv->netdev, out_dev, ifindexes, if_count, extack))
return -EOPNOTSUPP;
parse_state->ifindexes[if_count] = out_dev->ifindex;
parse_state->if_count++;
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
return -ENODEV;
if (is_vlan_dev(out_dev)) {
err = mlx5e_tc_act_vlan_add_push_action(priv, attr, &out_dev, extack);
if (err)
return err;
}
if (is_vlan_dev(parse_attr->filter_dev)) {
err = mlx5e_tc_act_vlan_add_pop_action(priv, attr, extack);
if (err)
return err;
}
if (netif_is_macvlan(out_dev))
out_dev = macvlan_dev_real_dev(out_dev);
err = verify_uplink_forwarding(priv, attr, out_dev, extack);
if (err)
return err;
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
return -EOPNOTSUPP;
}
if (same_vf_reps(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack, "can't forward from a VF to itself");
return -EOPNOTSUPP;
}
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
esw_attr->out_count++;
return 0;
}
static int
parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct net_device *out_dev = act->dev;
int err;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_EGRESS,
&attr->action, esw_attr->out_count);
if (err)
return err;
esw_attr->out_count++;
return 0;
}
static int
tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct net_device *out_dev = act->dev;
int err = -EOPNOTSUPP;
if (parse_state->encap)
err = parse_mirred_encap(parse_state, act, attr);
else if (netdev_port_same_parent_id(priv->netdev, out_dev))
err = parse_mirred(parse_state, act, priv, attr);
else if (netif_is_ovs_master(out_dev))
err = parse_mirred_ovs_master(parse_state, act, priv, attr);
if (err)
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_mirred = {
.can_offload = tc_act_can_offload_mirred,
.parse_action = tc_act_parse_mirred,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct net_device *out_dev = act->dev;
struct mlx5e_priv *priv = flow->priv;
if (act->id != FLOW_ACTION_REDIRECT)
return false;
if (priv->netdev->netdev_ops != out_dev->netdev_ops ||
!mlx5e_same_hw_devs(priv, netdev_priv(out_dev))) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
netdev_warn(priv->netdev,
"devices %s %s not on same switch HW, can't offload forwarding\n",
netdev_name(priv->netdev),
out_dev->name);
return false;
}
return true;
}
static int
tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
flow_flag_set(parse_state->flow, HAIRPIN);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = {
.can_offload = tc_act_can_offload_mirred_nic,
.parse_action = tc_act_parse_mirred_nic,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <net/bareudp.h>
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_priv *priv = parse_state->flow->priv;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l2_to_l3_tunnel) ||
act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
NL_SET_ERR_MSG_MOD(extack, "mpls push is supported only for mpls_uc protocol");
return false;
}
return true;
}
static int
tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
parse_state->mpls_push = true;
return 0;
}
static bool
tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct net_device *filter_dev;
filter_dev = flow->attr->parse_attr->filter_dev;
/* we only support mpls pop if it is the first action
* and the filter net device is bareudp. Subsequent
* actions can be pedit and the last can be mirred
* egress redirect.
*/
if (act_index) {
NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
return false;
}
if (!netif_is_bareudp(filter_dev)) {
NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices");
return false;
}
return true;
}
static int
tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_mpls_push = {
.can_offload = tc_act_can_offload_mpls_push,
.parse_action = tc_act_parse_mpls_push,
};
struct mlx5e_tc_act mlx5e_tc_act_mpls_pop = {
.can_offload = tc_act_can_offload_mpls_pop,
.parse_action = tc_act_parse_mpls_pop,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_vlan.h>
#include "act.h"
#include "pedit.h"
#include "en/tc_priv.h"
#include "en/mod_hdr.h"
static int pedit_header_offsets[] = {
[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
};
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int
set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
u32 *curr_pmask, *curr_pval;
curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
if (*curr_pmask & mask) { /* disallow acting twice on the same location */
NL_SET_ERR_MSG_MOD(extack,
"curr_pmask and new mask same. Acting twice on same location");
goto out_err;
}
*curr_pmask |= mask;
*curr_pval |= (val & mask);
return 0;
out_err:
return -EOPNOTSUPP;
}
static int
parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
u8 htype = act->mangle.htype;
int err = -EOPNOTSUPP;
u32 mask, val, offset;
if (htype == FLOW_ACT_MANGLE_UNSPEC) {
NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
goto out_err;
}
if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
NL_SET_ERR_MSG_MOD(extack, "The pedit offload action is not supported");
goto out_err;
}
mask = act->mangle.mask;
val = act->mangle.val;
offset = act->mangle.offset;
err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
if (err)
goto out_err;
hdrs[cmd].pedits++;
return 0;
out_err:
return err;
}
static int
parse_pedit_to_reformat(const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
u32 mask, val, offset;
u32 *p;
if (act->id != FLOW_ACTION_MANGLE) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
return -EOPNOTSUPP;
}
if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
return -EOPNOTSUPP;
}
mask = ~act->mangle.mask;
val = act->mangle.val;
offset = act->mangle.offset;
p = (u32 *)&parse_attr->eth;
*(p + (offset >> 2)) |= (val & mask);
return 0;
}
int
mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
return parse_pedit_to_reformat(act, parse_attr, extack);
return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack);
}
static bool
tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5e_tc_flow *flow = parse_state->flow;
enum mlx5_flow_namespace_type ns_type;
int err;
ns_type = mlx5e_get_flow_namespace(flow);
err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type,
attr->parse_attr, parse_state->hdrs,
flow, parse_state->extack);
if (err)
return err;
if (flow_flag_test(flow, L3_TO_L2_DECAP))
goto out;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
esw_attr->split_count = esw_attr->out_count;
out:
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_pedit = {
.can_offload = tc_act_can_offload_pedit,
.parse_action = tc_act_parse_pedit,
};
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_TC_ACT_PEDIT_H__
#define __MLX5_EN_TC_ACT_PEDIT_H__
#include "en_tc.h"
struct pedit_headers {
struct ethhdr eth;
struct vlan_hdr vlan;
struct iphdr ip4;
struct ipv6hdr ip6;
struct tcphdr tcp;
struct udphdr udp;
};
struct pedit_headers_action {
struct pedit_headers vals;
struct pedit_headers masks;
u32 pedits;
};
int
mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (act->ptype != PACKET_HOST) {
NL_SET_ERR_MSG_MOD(extack, "skbedit ptype is only supported with type host");
return -EOPNOTSUPP;
}
parse_state->ptype_host = true;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_ptype = {
.can_offload = tc_act_can_offload_ptype,
.parse_action = tc_act_parse_ptype,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct net_device *out_dev = act->dev;
struct mlx5_esw_flow_attr *esw_attr;
parse_attr = flow->attr->parse_attr;
esw_attr = flow->attr->esw_attr;
if (!out_dev)
return false;
if (!netif_is_ovs_master(out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to ingress is supported only for OVS internal ports");
return false;
}
if (netif_is_ovs_master(parse_attr->filter_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to ingress is not supported from internal port");
return false;
}
if (!parse_state->ptype_host) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to int port ingress requires ptype=host action");
return false;
}
if (esw_attr->out_count) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to int port ingress is supported only as single destination");
return false;
}
return true;
}
static int
tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct net_device *out_dev = act->dev;
int err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
&attr->action, esw_attr->out_count);
if (err)
return err;
esw_attr->out_count++;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress = {
.can_offload = tc_act_can_offload_redirect_ingress,
.parse_action = tc_act_parse_redirect_ingress,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <net/psample.h>
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (flow_flag_test(parse_state->flow, CT)) {
NL_SET_ERR_MSG_MOD(extack,
"Sample action with connection tracking is not supported");
return false;
}
return true;
}
static int
tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_sample_attr *sample_attr;
sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
if (!sample_attr)
return -ENOMEM;
sample_attr->rate = act->sample.rate;
sample_attr->group_num = act->sample.psample_group->group_num;
if (act->sample.truncate)
sample_attr->trunc_size = act->sample.trunc_size;
attr->sample_attr = sample_attr;
flow_flag_set(parse_state->flow, SAMPLE);
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_sample = {
.can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (parse_state->num_actions != 1) {
NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
return false;
}
return true;
}
static int
tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_trap = {
.can_offload = tc_act_can_offload_trap,
.parse_action = tc_act_parse_trap,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_tun_encap.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
if (!act->tunnel) {
NL_SET_ERR_MSG_MOD(parse_state->extack,
"Zero tunnel attributes is not supported");
return false;
}
return true;
}
static int
tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
parse_state->tun_info = act->tunnel;
parse_state->encap = true;
return 0;
}
static bool
tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
parse_state->decap = true;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_tun_encap = {
.can_offload = tc_act_can_offload_tun_encap,
.parse_action = tc_act_parse_tun_encap,
};
struct mlx5e_tc_act mlx5e_tc_act_tun_decap = {
.can_offload = tc_act_can_offload_tun_decap,
.parse_action = tc_act_parse_tun_decap,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_vlan.h>
#include "act.h"
#include "vlan.h"
#include "en/tc_priv.h"
static int
add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
const struct flow_action_entry prio_tag_act = {
.vlan.vid = 0,
.vlan.prio =
MLX5_GET(fte_match_set_lyr_2_4,
mlx5e_get_match_headers_value(*action,
&parse_attr->spec),
first_prio) &
MLX5_GET(fte_match_set_lyr_2_4,
mlx5e_get_match_headers_criteria(*action,
&parse_attr->spec),
first_prio),
};
return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
&prio_tag_act, parse_attr, hdrs, action,
extack);
}
static int
parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action,
struct netlink_ext_ack *extack)
{
u8 vlan_idx = attr->total_vlan;
if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
return -EOPNOTSUPP;
}
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH)) {
NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
}
break;
case FLOW_ACTION_VLAN_PUSH:
attr->vlan_vid[vlan_idx] = act->vlan.vid;
attr->vlan_prio[vlan_idx] = act->vlan.prio;
attr->vlan_proto[vlan_idx] = act->vlan.proto;
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH)) {
NL_SET_ERR_MSG_MOD(extack,
"vlan push action is not supported for vlan depth > 1");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(act->vlan.proto != htons(ETH_P_8021Q) ||
act->vlan.prio)) {
NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
}
attr->total_vlan = vlan_idx + 1;
return 0;
}
int
mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_PUSH,
.vlan.vid = vlan_dev_vlan_id(vlan_dev),
.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
.vlan.prio = 0,
};
int err;
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
if (err)
return err;
rcu_read_lock();
*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
rcu_read_unlock();
if (!*out_dev)
return -ENODEV;
if (is_vlan_dev(*out_dev))
err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
return err;
}
int
mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
};
int nest_level, err = 0;
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
extack);
if (err)
return err;
}
return err;
}
static bool
tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int err;
if (act->id == FLOW_ACTION_VLAN_PUSH &&
(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
/* Replace vlan pop+push with vlan modify */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
attr->parse_attr, parse_state->hdrs,
&attr->action, parse_state->extack);
} else {
err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
parse_state->extack);
}
if (err)
return err;
esw_attr->split_count = esw_attr->out_count;
return 0;
}
static int
tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct pedit_headers_action *hdrs = parse_state->hdrs;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
/* For prio tag mode, replace vlan pop with rewrite vlan prio
* tag rewrite.
*/
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
&attr->action, extack);
if (err)
return err;
}
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_vlan = {
.can_offload = tc_act_can_offload_vlan,
.parse_action = tc_act_parse_vlan,
.post_parse = tc_act_post_parse_vlan,
};
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_TC_ACT_VLAN_H__
#define __MLX5_EN_TC_ACT_VLAN_H__
#include <net/flow_offload.h>
#include "en/tc_priv.h"
struct pedit_headers_action;
int
mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
struct netlink_ext_ack *extack);
int
mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack);
int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_vlan.h>
#include "act.h"
#include "vlan.h"
#include "en/tc_priv.h"
struct pedit_headers_action;
int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
u16 mask16 = VLAN_VID_MASK;
u16 val16 = act->vlan.vid & VLAN_VID_MASK;
const struct flow_action_entry pedit_act = {
.id = FLOW_ACTION_MANGLE,
.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
};
u8 match_prio_mask, match_prio_val;
void *headers_c, *headers_v;
int err;
headers_c = mlx5e_get_match_headers_criteria(*action, &parse_attr->spec);
headers_v = mlx5e_get_match_headers_value(*action, &parse_attr->spec);
if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
NL_SET_ERR_MSG_MOD(extack, "VLAN rewrite action must have VLAN protocol match");
return -EOPNOTSUPP;
}
match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
NL_SET_ERR_MSG_MOD(extack, "Changing VLAN prio is not supported");
return -EOPNOTSUPP;
}
err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
NULL, extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
}
static bool
tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
enum mlx5_flow_namespace_type ns_type;
int err;
ns_type = mlx5e_get_flow_namespace(parse_state->flow);
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
attr->parse_attr, parse_state->hdrs,
&attr->action, parse_state->extack);
if (err)
return err;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->split_count = attr->esw_attr->out_count;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = {
.can_offload = tc_act_can_offload_vlan_mangle,
.parse_action = tc_act_parse_vlan_mangle,
};
......@@ -5,11 +5,14 @@
#define __MLX5_EN_TC_PRIV_H__
#include "en_tc.h"
#include "en/tc/act/act.h"
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
#define MLX5E_TC_MAX_SPLITS 1
#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
......@@ -37,6 +40,7 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
struct ethhdr eth;
struct mlx5e_tc_act_parse_state parse_state;
};
/* Helper struct for accessing a struct containing list_head array.
......@@ -115,7 +119,11 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow);
bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
{
......@@ -176,4 +184,8 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
#endif /* __MLX5_EN_TC_PRIV_H__ */
......@@ -39,10 +39,6 @@
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
#include <linux/if_macvlan.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/psample.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
......@@ -62,6 +58,7 @@
#include "en/mod_hdr.h"
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "en/tc/act/act.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
......@@ -70,8 +67,6 @@
#include "lag/lag.h"
#include "lag/mp.h"
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
......@@ -399,7 +394,7 @@ bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, ESWITCH);
}
static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, FT);
}
......@@ -409,7 +404,7 @@ bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, OFFLOADED);
}
static int get_flow_name_space(struct mlx5e_tc_flow *flow)
int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
{
return mlx5e_is_eswitch_flow(flow) ?
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
......@@ -420,7 +415,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
&priv->fs.tc.mod_hdr;
}
......@@ -433,7 +428,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
get_flow_name_space(flow),
mlx5e_get_flow_namespace(flow),
&parse_attr->mod_hdr_acts);
if (IS_ERR(mh))
return PTR_ERR(mh);
......@@ -937,7 +932,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
struct mlx5_fs_chains *nic_chains = nic_chains(priv);
struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
......@@ -1091,7 +1086,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
struct mlx5_fs_chains *nic_chains = nic_chains(priv);
struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
mlx5_del_flow_rules(rule);
......@@ -1123,7 +1118,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs.tc.t = NULL;
}
mutex_unlock(&priv->fs.tc.t_lock);
......@@ -1304,8 +1299,6 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
mutex_unlock(&uplink_priv->unready_flows_lock);
}
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
{
struct mlx5_core_dev *out_mdev, *route_mdev;
......@@ -1320,7 +1313,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
route_mdev->coredev_type != MLX5_COREDEV_VF)
return false;
return same_hw_devs(out_priv, route_priv);
return mlx5e_same_hw_devs(out_priv, route_priv);
}
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
......@@ -1367,7 +1360,7 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
struct mlx5_modify_hdr *mod_hdr;
mod_hdr = mlx5_modify_header_alloc(priv->mdev,
get_flow_name_space(flow),
mlx5e_get_flow_namespace(flow),
mod_hdr_acts->num_actions,
mod_hdr_acts->actions);
if (IS_ERR(mod_hdr))
......@@ -2053,16 +2046,14 @@ static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
outer_headers);
}
static void *get_match_headers_value(u32 flags,
struct mlx5_flow_spec *spec)
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_value(spec) :
get_match_outer_headers_value(spec);
}
static void *get_match_headers_criteria(u32 flags,
struct mlx5_flow_spec *spec)
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_criteria(spec) :
......@@ -2607,55 +2598,6 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return err;
}
struct pedit_headers {
struct ethhdr eth;
struct vlan_hdr vlan;
struct iphdr ip4;
struct ipv6hdr ip6;
struct tcphdr tcp;
struct udphdr udp;
};
struct pedit_headers_action {
struct pedit_headers vals;
struct pedit_headers masks;
u32 pedits;
};
static int pedit_header_offsets[] = {
[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
};
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
u32 *curr_pmask, *curr_pval;
curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
if (*curr_pmask & mask) { /* disallow acting twice on the same location */
NL_SET_ERR_MSG_MOD(extack,
"curr_pmask and new mask same. Acting twice on same location");
goto out_err;
}
*curr_pmask |= mask;
*curr_pval |= (val & mask);
return 0;
out_err:
return -EOPNOTSUPP;
}
struct mlx5_fields {
u8 field;
u8 field_bsize;
......@@ -2776,8 +2718,8 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks;
......@@ -2892,88 +2834,6 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
static const struct pedit_headers zero_masks = {};
static int
parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
int err = -EOPNOTSUPP;
u32 mask, val, offset;
u8 htype;
htype = act->mangle.htype;
err = -EOPNOTSUPP; /* can't be all optimistic */
if (htype == FLOW_ACT_MANGLE_UNSPEC) {
NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
goto out_err;
}
if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
NL_SET_ERR_MSG_MOD(extack,
"The pedit offload action is not supported");
goto out_err;
}
mask = act->mangle.mask;
val = act->mangle.val;
offset = act->mangle.offset;
err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
if (err)
goto out_err;
hdrs[cmd].pedits++;
return 0;
out_err:
return err;
}
static int
parse_pedit_to_reformat(const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
u32 mask, val, offset;
u32 *p;
if (act->id != FLOW_ACTION_MANGLE) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
return -EOPNOTSUPP;
}
if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
return -EOPNOTSUPP;
}
mask = ~act->mangle.mask;
val = act->mangle.val;
offset = act->mangle.offset;
p = (u32 *)&parse_attr->eth;
*(p + (offset >> 2)) |= (val & mask);
return 0;
}
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
return parse_pedit_to_reformat(act, parse_attr, extack);
return parse_pedit_to_modify_hdr(priv, act, namespace,
parse_attr, hdrs, extack);
}
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
......@@ -3009,35 +2869,6 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
return err;
}
static bool csum_offload_supported(struct mlx5e_priv *priv,
u32 action,
u32 update_flags,
struct netlink_ext_ack *extack)
{
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
TCA_CSUM_UPDATE_FLAG_UDP;
/* The HW recalcs checksums only if re-writing headers */
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
NL_SET_ERR_MSG_MOD(extack,
"TC csum action is only offloaded with pedit");
netdev_warn(priv->netdev,
"TC csum action is only offloaded with pedit\n");
return false;
}
if (update_flags & ~prot_flags) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload TC csum action for some header/s");
netdev_warn(priv->netdev,
"can't offload TC csum action for some header/s - flags %#x\n",
update_flags);
return false;
}
return true;
}
struct ip_ttl_word {
__u8 ttl;
__u8 protocol;
......@@ -3160,8 +2991,8 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
u8 ip_proto;
int i;
headers_c = get_match_headers_criteria(actions, spec);
headers_v = get_match_headers_value(actions, spec);
headers_c = mlx5e_get_match_headers_criteria(actions, spec);
headers_v = mlx5e_get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
......@@ -3268,7 +3099,7 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv
return priv->mdev == peer_priv->mdev;
}
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
u64 fsystem_guid, psystem_guid;
......@@ -3282,124 +3113,45 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
static bool same_vf_reps(struct mlx5e_priv *priv,
struct net_device *out_dev)
{
return mlx5e_eswitch_vf_rep(priv->netdev) &&
priv->netdev == out_dev;
}
static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
u16 mask16 = VLAN_VID_MASK;
u16 val16 = act->vlan.vid & VLAN_VID_MASK;
const struct flow_action_entry pedit_act = {
.id = FLOW_ACTION_MANGLE,
.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
};
u8 match_prio_mask, match_prio_val;
void *headers_c, *headers_v;
int err;
headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
headers_v = get_match_headers_value(*action, &parse_attr->spec);
if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN rewrite action must have VLAN protocol match");
return -EOPNOTSUPP;
}
match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
NL_SET_ERR_MSG_MOD(extack,
"Changing VLAN prio is not supported");
return -EOPNOTSUPP;
}
err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
}
static int
add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
const struct flow_action_entry prio_tag_act = {
.vlan.vid = 0,
.vlan.prio =
MLX5_GET(fte_match_set_lyr_2_4,
get_match_headers_value(*action,
&parse_attr->spec),
first_prio) &
MLX5_GET(fte_match_set_lyr_2_4,
get_match_headers_criteria(*action,
&parse_attr->spec),
first_prio),
};
parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5_flow_attr *attr = flow->attr;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_priv *priv = flow->priv;
const struct flow_action_entry *act;
struct mlx5e_tc_act *tc_act;
int err, i;
return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
&prio_tag_act, parse_attr, hdrs, action,
extack);
}
ns_type = mlx5e_get_flow_namespace(flow);
static int validate_goto_chain(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
bool is_esw = mlx5e_is_eswitch_flow(flow);
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
struct mlx5_fs_chains *chains;
struct mlx5_eswitch *esw;
u32 reformat_and_fwd;
u32 max_chain;
flow_action_for_each(i, act, flow_action) {
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act) {
NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
return -EOPNOTSUPP;
}
esw = priv->mdev->priv.eswitch;
chains = is_esw ? esw_chains(esw) : nic_chains(priv);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
return -EOPNOTSUPP;
}
if (!tc_act->can_offload(parse_state, act, i))
return -EOPNOTSUPP;
if (!mlx5_chains_backwards_supported(chains) &&
dest_chain <= flow->attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Goto lower numbered chain isn't supported");
return -EOPNOTSUPP;
err = tc_act->parse_action(parse_state, act, priv, attr);
if (err)
return err;
}
if (dest_chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested destination chain is out of supported range");
return -EOPNOTSUPP;
}
flow_action_for_each(i, act, flow_action) {
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act || !tc_act->post_parse ||
!tc_act->can_offload(parse_state, act, i))
continue;
if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
!reformat_and_fwd) {
NL_SET_ERR_MSG_MOD(extack,
"Goto chain is not allowed if action has reformat or decap");
return -EOPNOTSUPP;
err = tc_act->post_parse(parse_state, priv, attr);
if (err)
return err;
}
return 0;
......@@ -3420,7 +3172,7 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
!hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
return 0;
ns_type = get_flow_name_space(flow);
ns_type = mlx5e_get_flow_namespace(flow);
err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
&attr->action, extack);
......@@ -3468,116 +3220,26 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
const struct flow_action_entry *act;
struct mlx5_nic_flow_attr *nic_attr;
int err, i;
struct pedit_headers_action *hdrs;
int err;
err = flow_action_supported(flow_action, extack);
if (err)
return err;
nic_attr = attr->nic_attr;
nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
parse_attr = attr->parse_attr;
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
hdrs = parse_state->hdrs;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_DROP:
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
parse_attr, hdrs, NULL, extack);
if (err)
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_KERNEL,
act, parse_attr, hdrs,
&attr->action, extack);
if (err)
return err;
break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, attr->action,
act->csum_flags,
extack))
break;
return -EOPNOTSUPP;
case FLOW_ACTION_REDIRECT: {
struct net_device *peer_dev = act->dev;
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
flow_flag_set(flow, HAIRPIN);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
NL_SET_ERR_MSG_MOD(extack,
"device is not on same HW, can't offload");
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
return -EOPNOTSUPP;
}
}
break;
case FLOW_ACTION_MARK: {
u32 mark = act->mark;
if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(extack,
"Bad flow mark - only 16 bit is supported");
return -EOPNOTSUPP;
}
nic_attr->flow_tag = mark;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
break;
case FLOW_ACTION_GOTO:
err = validate_goto_chain(priv, flow, act, extack);
if (err)
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
&parse_attr->mod_hdr_acts,
act, extack);
if (err)
return err;
flow_flag_set(flow, CT);
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"The offload action is not supported in NIC action");
return -EOPNOTSUPP;
}
}
if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
err = parse_tc_actions(parse_state, flow_action);
if (err)
return err;
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
......@@ -3599,146 +3261,7 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
mlx5e_eswitch_vf_rep(priv->netdev) &&
mlx5e_eswitch_vf_rep(peer_netdev) &&
same_hw_devs(priv, peer_priv));
}
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action,
struct netlink_ext_ack *extack)
{
u8 vlan_idx = attr->total_vlan;
if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
return -EOPNOTSUPP;
}
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH)) {
NL_SET_ERR_MSG_MOD(extack,
"vlan pop action is not supported");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
}
break;
case FLOW_ACTION_VLAN_PUSH:
attr->vlan_vid[vlan_idx] = act->vlan.vid;
attr->vlan_prio[vlan_idx] = act->vlan.prio;
attr->vlan_proto[vlan_idx] = act->vlan.proto;
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH)) {
NL_SET_ERR_MSG_MOD(extack,
"vlan push action is not supported for vlan depth > 1");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(act->vlan.proto != htons(ETH_P_8021Q) ||
act->vlan.prio)) {
NL_SET_ERR_MSG_MOD(extack,
"vlan push action is not supported");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
}
attr->total_vlan = vlan_idx + 1;
return 0;
}
static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
struct net_device *out_dev)
{
struct net_device *fdb_out_dev = out_dev;
struct net_device *uplink_upper;
rcu_read_lock();
uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
if (uplink_upper && netif_is_lag_master(uplink_upper) &&
uplink_upper == out_dev) {
fdb_out_dev = uplink_dev;
} else if (netif_is_lag_master(out_dev)) {
fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
if (fdb_out_dev &&
(!mlx5e_eswitch_rep(fdb_out_dev) ||
!netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
fdb_out_dev = NULL;
}
rcu_read_unlock();
return fdb_out_dev;
}
static int add_vlan_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_PUSH,
.vlan.vid = vlan_dev_vlan_id(vlan_dev),
.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
.vlan.prio = 0,
};
int err;
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
if (err)
return err;
rcu_read_lock();
*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
rcu_read_unlock();
if (!*out_dev)
return -ENODEV;
if (is_vlan_dev(*out_dev))
err = add_vlan_push_action(priv, attr, out_dev, extack);
return err;
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
};
int nest_level, err = 0;
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr,
&attr->action, extack);
if (err)
return err;
}
return err;
mlx5e_same_hw_devs(priv, peer_priv));
}
static bool same_hw_reps(struct mlx5e_priv *priv,
......@@ -3750,7 +3273,7 @@ static bool same_hw_reps(struct mlx5e_priv *priv,
return mlx5e_eswitch_rep(priv->netdev) &&
mlx5e_eswitch_rep(peer_netdev) &&
same_hw_devs(priv, peer_priv);
mlx5e_same_hw_devs(priv, peer_priv);
}
static bool is_lag_dev(struct mlx5e_priv *priv,
......@@ -3774,66 +3297,6 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
same_port_devs(priv, netdev_priv(out_dev));
}
static bool is_duplicated_output_device(struct net_device *dev,
struct net_device *out_dev,
int *ifindexes, int if_count,
struct netlink_ext_ack *extack)
{
int i;
for (i = 0; i < if_count; i++) {
if (ifindexes[i] == out_dev->ifindex) {
NL_SET_ERR_MSG_MOD(extack,
"can't duplicate output to same device");
netdev_err(dev, "can't duplicate output to same device: %s\n",
out_dev->name);
return true;
}
}
return false;
}
static int verify_uplink_forwarding(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rep_priv;
/* Forwarding non encapsulated traffic between
* uplink ports is allowed only if
* termination_table_raw_traffic cap is set.
*
* Input vport was stored attr->in_rep.
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
mlx5e_eswitch_uplink_rep(out_dev)))
return 0;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
termination_table_raw_traffic)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are both uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
} else if (out_dev != rep_priv->netdev) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not the same uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
}
return 0;
}
int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
int ifindex,
......@@ -3873,26 +3336,18 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
return 0;
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
static int
parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_sample_attr sample_attr = {};
const struct ip_tunnel_info *info = NULL;
struct mlx5_flow_attr *attr = flow->attr;
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
struct mlx5_esw_flow_attr *esw_attr;
bool encap = false, decap = false;
int err, i, if_count = 0;
bool ptype_host = false;
bool mpls_push = false;
struct pedit_headers_action *hdrs;
int err;
err = flow_action_supported(flow_action, extack);
if (err)
......@@ -3900,348 +3355,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
hdrs = parse_state->hdrs;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
break;
case FLOW_ACTION_PTYPE:
if (act->ptype != PACKET_HOST) {
NL_SET_ERR_MSG_MOD(extack,
"skbedit ptype is only supported with type host");
return -EOPNOTSUPP;
}
ptype_host = true;
break;
case FLOW_ACTION_DROP:
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_TRAP:
if (!flow_offload_has_one_action(flow_action)) {
NL_SET_ERR_MSG_MOD(extack,
"action trap is supported as a sole action only");
return -EOPNOTSUPP;
}
attr->action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT);
attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
break;
case FLOW_ACTION_MPLS_PUSH:
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
reformat_l2_to_l3_tunnel) ||
act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
NL_SET_ERR_MSG_MOD(extack,
"mpls push is supported only for mpls_uc protocol");
return -EOPNOTSUPP;
}
mpls_push = true;
break;
case FLOW_ACTION_MPLS_POP:
/* we only support mpls pop if it is the first action
* and the filter net device is bareudp. Subsequent
* actions can be pedit and the last can be mirred
* egress redirect.
*/
if (i) {
NL_SET_ERR_MSG_MOD(extack,
"mpls pop supported only as first action");
return -EOPNOTSUPP;
}
if (!netif_is_bareudp(parse_attr->filter_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"mpls pop supported only on bareudp devices");
return -EOPNOTSUPP;
}
parse_attr->eth.h_proto = act->mpls_pop.proto;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_flag_set(flow, L3_TO_L2_DECAP);
break;
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
parse_attr, hdrs, flow, extack);
if (err)
return err;
if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
esw_attr->split_count = esw_attr->out_count;
}
break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, attr->action,
act->csum_flags, extack))
break;
return -EOPNOTSUPP;
case FLOW_ACTION_REDIRECT_INGRESS: {
struct net_device *out_dev;
out_dev = act->dev;
if (!out_dev)
return -EOPNOTSUPP;
if (!netif_is_ovs_master(out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to ingress is supported only for OVS internal ports");
return -EOPNOTSUPP;
}
if (netif_is_ovs_master(parse_attr->filter_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to ingress is not supported from internal port");
return -EOPNOTSUPP;
}
if (!ptype_host) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to int port ingress requires ptype=host action");
return -EOPNOTSUPP;
}
if (esw_attr->out_count) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to int port ingress is supported only as single destination");
return -EOPNOTSUPP;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
&attr->action, esw_attr->out_count);
if (err)
return err;
esw_attr->out_count++;
break;
}
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_MIRRED: {
struct mlx5e_priv *out_priv;
struct net_device *out_dev;
out_dev = act->dev;
if (!out_dev) {
/* out_dev is NULL when filters with
* non-existing mirred device are replayed to
* the driver.
*/
return -EINVAL;
}
if (mpls_push && !netif_is_bareudp(out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"mpls is supported only through a bareudp device");
return -EOPNOTSUPP;
}
if (ft_flow && out_dev == priv->netdev) {
/* Ignore forward to self rules generated
* by adding both mlx5 devs to the flow table
* block on a normal nft offload setup.
*/
return -EOPNOTSUPP;
}
if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
netdev_warn(priv->netdev,
"can't support more than %d output ports, can't offload forwarding\n",
esw_attr->out_count);
return -EOPNOTSUPP;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (encap) {
parse_attr->mirred_ifindex[esw_attr->out_count] =
out_dev->ifindex;
parse_attr->tun_info[esw_attr->out_count] =
mlx5e_dup_tun_info(info);
if (!parse_attr->tun_info[esw_attr->out_count])
return -ENOMEM;
encap = false;
esw_attr->dests[esw_attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
if (is_duplicated_output_device(priv->netdev,
out_dev,
ifindexes,
if_count,
extack))
return -EOPNOTSUPP;
ifindexes[if_count] = out_dev->ifindex;
if_count++;
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
return -ENODEV;
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr, &out_dev, extack);
if (err)
return err;
}
if (is_vlan_dev(parse_attr->filter_dev)) {
err = add_vlan_pop_action(priv, attr, extack);
if (err)
return err;
}
if (netif_is_macvlan(out_dev))
out_dev = macvlan_dev_real_dev(out_dev);
err = verify_uplink_forwarding(priv, flow, out_dev, extack);
if (err)
return err;
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
return -EOPNOTSUPP;
}
if (same_vf_reps(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"can't forward from a VF to itself");
return -EOPNOTSUPP;
}
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
esw_attr->out_count++;
} else if (netif_is_ovs_master(out_dev)) {
err = mlx5e_set_fwd_to_int_port_actions(priv, attr,
out_dev->ifindex,
MLX5E_TC_INT_PORT_EGRESS,
&attr->action,
esw_attr->out_count);
if (err)
return err;
esw_attr->out_count++;
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
* *attempt* to install a filter on invalid
* eswitch should not trigger an explicit error
*/
return -EINVAL;
} else {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
netdev_warn(priv->netdev,
"devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name,
out_dev->name);
return -EOPNOTSUPP;
}
}
break;
case FLOW_ACTION_TUNNEL_ENCAP:
info = act->tunnel;
if (info) {
encap = true;
} else {
NL_SET_ERR_MSG_MOD(extack,
"Zero tunnel attributes is not supported");
return -EOPNOTSUPP;
}
break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
if (act->id == FLOW_ACTION_VLAN_PUSH &&
(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
/* Replace vlan pop+push with vlan modify */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_FDB,
act, parse_attr, hdrs,
&attr->action, extack);
} else {
err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
extack);
}
if (err)
return err;
esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_FDB,
act, parse_attr, hdrs,
&attr->action, extack);
if (err)
return err;
esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_TUNNEL_DECAP:
decap = true;
break;
case FLOW_ACTION_GOTO:
err = validate_goto_chain(priv, flow, act, extack);
if (err)
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
if (flow_flag_test(flow, SAMPLE)) {
NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
return -EOPNOTSUPP;
}
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
&parse_attr->mod_hdr_acts,
act, extack);
if (err)
return err;
flow_flag_set(flow, CT);
esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_SAMPLE:
if (flow_flag_test(flow, CT)) {
NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
return -EOPNOTSUPP;
}
sample_attr.rate = act->sample.rate;
sample_attr.group_num = act->sample.psample_group->group_num;
if (act->sample.truncate)
sample_attr.trunc_size = act->sample.trunc_size;
flow_flag_set(flow, SAMPLE);
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"The offload action is not supported in FDB action");
return -EOPNOTSUPP;
}
}
err = parse_tc_actions(parse_state, flow_action);
if (err)
return err;
/* Forward to/from internal port can only have 1 dest */
if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
......@@ -4251,18 +3372,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
/* For prio tag mode, replace vlan pop with rewrite vlan prio
* tag rewrite.
*/
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
&attr->action, extack);
if (err)
return err;
}
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
return err;
......@@ -4270,30 +3379,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
if (attr->dest_chain && decap) {
/* It can be supported if we'll create a mapping for
* the tunnel device only (without tunnel), and set
* this tunnel id with this decap flow.
*
* On restore (miss), we'll just set this saved tunnel
* device.
*/
NL_SET_ERR_MSG(extack, "Decap with goto isn't supported");
netdev_warn(priv->netdev, "Decap with goto isn't supported");
return -EOPNOTSUPP;
}
/* Allocate sample attribute only when there is a sample action and
* no errors after parsing.
*/
if (flow_flag_test(flow, SAMPLE)) {
attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
if (!attr->sample_attr)
return -ENOMEM;
*attr->sample_attr = sample_attr;
}
return 0;
}
......@@ -4390,7 +3475,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->cookie = f->cookie;
flow->priv = priv;
attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
if (!attr)
goto err_free;
......@@ -4992,7 +4077,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
u16 peer_vhca_id;
int bkt;
if (!same_hw_devs(priv, peer_priv))
if (!mlx5e_same_hw_devs(priv, peer_priv))
return;
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
......
......@@ -151,7 +151,6 @@ enum {
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment