Commit 19f1f467 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Rework-matchall-offloading-plumbing'

Ido Schimmel says:

====================
mlxsw: Rework matchall offloading plumbing

Jiri says:

Currently the matchall and flower are handled by registering separate
callbacks in mlxsw. That leads to faulty indication "in_hw_count 2" in
filter show command for every inserted flower filter. That happens
because matchall callback just blindly returns 0 for it and it is
wrongly accounted for as "the offloader".

I inspected different ways to fix this problem. The only clean solution
is to rework handling of matchall in mlxsw a bit. The driver newely
registers one callback for bound block which is called for both matchall
and flower filter insertions.

On the way, iron out the matchall code a bit, push it into a separate
file etc.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6488f11f 075c8aa7
...@@ -21,6 +21,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ ...@@ -21,6 +21,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_atcam.o spectrum_acl_erp.o \ spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
spectrum_acl_bloom_filter.o spectrum_acl.o \ spectrum_acl_bloom_filter.o spectrum_acl.o \
spectrum_flow.o spectrum_matchall.o \
spectrum_flower.o spectrum_cnt.o \ spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \ spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \ spectrum_acl_flex_actions.o \
......
...@@ -25,9 +25,7 @@ ...@@ -25,9 +25,7 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <net/switchdev.h> #include <net/switchdev.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <net/netevent.h> #include <net/netevent.h>
#include <net/tc_act/tc_sample.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#include "spectrum.h" #include "spectrum.h"
...@@ -582,16 +580,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) ...@@ -582,16 +580,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0; return 0;
} }
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable, u32 rate)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char mpsc_pl[MLXSW_REG_MPSC_LEN];
mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
}
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up) bool is_up)
{ {
...@@ -1362,412 +1350,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, ...@@ -1362,412 +1350,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0; return 0;
} }
static struct mlxsw_sp_port_mall_tc_entry *
mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
unsigned long cookie) {
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
if (mall_tc_entry->cookie == cookie)
return mall_tc_entry;
return NULL;
}
static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
const struct flow_action_entry *act,
bool ingress)
{
enum mlxsw_sp_span_type span_type;
if (!act->dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
}
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
true, &mirror->span_id);
}
static void
mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
{
enum mlxsw_sp_span_type span_type;
span_type = mirror->ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
span_type, true);
}
static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *cls,
const struct flow_action_entry *act,
bool ingress)
{
int err;
if (!mlxsw_sp_port->sample)
return -EOPNOTSUPP;
if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
netdev_err(mlxsw_sp_port->dev, "sample already active\n");
return -EEXIST;
}
if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
return -EOPNOTSUPP;
}
rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
act->sample.psample_group);
mlxsw_sp_port->sample->truncate = act->sample.truncate;
mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
mlxsw_sp_port->sample->rate = act->sample.rate;
err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
if (err)
goto err_port_sample_set;
return 0;
err_port_sample_set:
RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
return err;
}
static void
mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
{
if (!mlxsw_sp_port->sample)
return;
mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
}
static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f,
bool ingress)
{
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
__be16 protocol = f->common.protocol;
struct flow_action_entry *act;
int err;
if (!flow_offload_has_one_action(&f->rule->action)) {
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
return -EOPNOTSUPP;
}
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
if (!mall_tc_entry)
return -ENOMEM;
mall_tc_entry->cookie = f->cookie;
act = &f->rule->action.entries[0];
if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror;
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
mirror, act,
ingress);
} else if (act->id == FLOW_ACTION_SAMPLE &&
protocol == htons(ETH_P_ALL)) {
mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
act, ingress);
} else {
err = -EOPNOTSUPP;
}
if (err)
goto err_add_action;
list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
return 0;
err_add_action:
kfree(mall_tc_entry);
return err;
}
static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f)
{
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
f->cookie);
if (!mall_tc_entry) {
netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
return;
}
list_del(&mall_tc_entry->list);
switch (mall_tc_entry->type) {
case MLXSW_SP_PORT_MALL_MIRROR:
mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
&mall_tc_entry->mirror);
break;
case MLXSW_SP_PORT_MALL_SAMPLE:
mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
break;
default:
WARN_ON(1);
}
kfree(mall_tc_entry);
}
static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f,
bool ingress)
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
ingress);
case TC_CLSMATCHALL_DESTROY:
mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
struct flow_cls_offload *f)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
switch (f->command) {
case FLOW_CLS_REPLACE:
return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
case FLOW_CLS_DESTROY:
mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
return 0;
case FLOW_CLS_STATS:
return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
case FLOW_CLS_TMPLT_CREATE:
return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
case FLOW_CLS_TMPLT_DESTROY:
mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
void *type_data,
void *cb_priv, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
switch (type) {
case TC_SETUP_CLSMATCHALL:
if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
type_data))
return -EOPNOTSUPP;
return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
ingress);
case TC_SETUP_CLSFLOWER:
return 0;
default:
return -EOPNOTSUPP;
}
}
static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
void *type_data,
void *cb_priv)
{
return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
cb_priv, true);
}
static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
void *type_data,
void *cb_priv)
{
return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
cb_priv, false);
}
static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct mlxsw_sp_acl_block *acl_block = cb_priv;
switch (type) {
case TC_SETUP_CLSMATCHALL:
return 0;
case TC_SETUP_CLSFLOWER:
if (mlxsw_sp_acl_block_disabled(acl_block))
return -EOPNOTSUPP;
return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
default:
return -EOPNOTSUPP;
}
}
static void mlxsw_sp_tc_block_flower_release(void *cb_priv)
{
struct mlxsw_sp_acl_block *acl_block = cb_priv;
mlxsw_sp_acl_block_destroy(acl_block);
}
static LIST_HEAD(mlxsw_sp_block_cb_list);
static int
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f, bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block;
struct flow_block_cb *block_cb;
bool register_block = false;
int err;
block_cb = flow_block_cb_lookup(f->block,
mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp);
if (!block_cb) {
acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
if (!acl_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp, acl_block,
mlxsw_sp_tc_block_flower_release);
if (IS_ERR(block_cb)) {
mlxsw_sp_acl_block_destroy(acl_block);
err = PTR_ERR(block_cb);
goto err_cb_register;
}
register_block = true;
} else {
acl_block = flow_block_cb_priv(block_cb);
}
flow_block_cb_incref(block_cb);
err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress, f->extack);
if (err)
goto err_block_bind;
if (ingress)
mlxsw_sp_port->ing_acl_block = acl_block;
else
mlxsw_sp_port->eg_acl_block = acl_block;
if (register_block) {
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
}
return 0;
err_block_bind:
if (!flow_block_cb_decref(block_cb))
flow_block_cb_free(block_cb);
err_cb_register:
return err;
}
static void
mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f, bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block;
struct flow_block_cb *block_cb;
int err;
block_cb = flow_block_cb_lookup(f->block,
mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp);
if (!block_cb)
return;
if (ingress)
mlxsw_sp_port->ing_acl_block = NULL;
else
mlxsw_sp_port->eg_acl_block = NULL;
acl_block = flow_block_cb_priv(block_cb);
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress);
if (!err && !flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
}
}
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
bool ingress;
int err;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
ingress = true;
} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
ingress = false;
} else {
return -EOPNOTSUPP;
}
f->driver_block_list = &mlxsw_sp_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
if (flow_block_cb_is_busy(cb, mlxsw_sp_port,
&mlxsw_sp_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
mlxsw_sp_port, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f,
ingress);
if (err) {
flow_block_cb_free(block_cb);
return err;
}
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
f, ingress);
block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
if (!block_cb)
return -ENOENT;
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data) void *type_data)
{ {
...@@ -1791,23 +1373,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, ...@@ -1791,23 +1373,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
} }
} }
static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
{ {
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
if (!enable) { if (!enable) {
if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
!list_empty(&mlxsw_sp_port->mall_tc_list)) {
netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL; return -EINVAL;
} }
mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
} else { } else {
mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
} }
return 0; return 0;
} }
...@@ -3695,7 +3275,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, ...@@ -3695,7 +3275,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mapping = *port_mapping; mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1; mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats = mlxsw_sp_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
...@@ -3704,13 +3283,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, ...@@ -3704,13 +3283,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats; goto err_alloc_stats;
} }
mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
GFP_KERNEL);
if (!mlxsw_sp_port->sample) {
err = -ENOMEM;
goto err_alloc_sample;
}
INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
&update_stats_cache); &update_stats_cache);
...@@ -3897,8 +3469,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, ...@@ -3897,8 +3469,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_port_swid_set: err_port_swid_set:
mlxsw_sp_port_module_unmap(mlxsw_sp_port); mlxsw_sp_port_module_unmap(mlxsw_sp_port);
err_port_module_map: err_port_module_map:
kfree(mlxsw_sp_port->sample);
err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats); free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats: err_alloc_stats:
free_netdev(dev); free_netdev(dev);
...@@ -3926,7 +3496,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) ...@@ -3926,7 +3496,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port); mlxsw_sp_port_module_unmap(mlxsw_sp_port);
kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats); free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev); free_netdev(mlxsw_sp_port->dev);
...@@ -4413,7 +3982,7 @@ static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, ...@@ -4413,7 +3982,7 @@ static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
{ {
struct mlxsw_sp *mlxsw_sp = priv; struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
struct psample_group *psample_group; struct mlxsw_sp_port_sample *sample;
u32 size; u32 size;
if (unlikely(!mlxsw_sp_port)) { if (unlikely(!mlxsw_sp_port)) {
...@@ -4421,22 +3990,14 @@ static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, ...@@ -4421,22 +3990,14 @@ static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
local_port); local_port);
goto out; goto out;
} }
if (unlikely(!mlxsw_sp_port->sample)) {
dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
local_port);
goto out;
}
size = mlxsw_sp_port->sample->truncate ?
mlxsw_sp_port->sample->trunc_size : skb->len;
rcu_read_lock(); rcu_read_lock();
psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); sample = rcu_dereference(mlxsw_sp_port->sample);
if (!psample_group) if (!sample)
goto out_unlock; goto out_unlock;
psample_sample_packet(psample_group, skb, size, size = sample->truncate ? sample->trunc_size : skb->len;
mlxsw_sp_port->dev->ifindex, 0, psample_sample_packet(sample->psample_group, skb, size,
mlxsw_sp_port->sample->rate); mlxsw_sp_port->dev->ifindex, 0, sample->rate);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
out: out:
......
...@@ -109,25 +109,6 @@ struct mlxsw_sp_mid { ...@@ -109,25 +109,6 @@ struct mlxsw_sp_mid {
unsigned long *ports_in_mid; /* bits array */ unsigned long *ports_in_mid; /* bits array */
}; };
enum mlxsw_sp_port_mall_action_type {
MLXSW_SP_PORT_MALL_MIRROR,
MLXSW_SP_PORT_MALL_SAMPLE,
};
struct mlxsw_sp_port_mall_mirror_tc_entry {
int span_id;
bool ingress;
};
struct mlxsw_sp_port_mall_tc_entry {
struct list_head list;
unsigned long cookie;
enum mlxsw_sp_port_mall_action_type type;
union {
struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
};
};
struct mlxsw_sp_sb; struct mlxsw_sp_sb;
struct mlxsw_sp_bridge; struct mlxsw_sp_bridge;
struct mlxsw_sp_router; struct mlxsw_sp_router;
...@@ -211,7 +192,7 @@ struct mlxsw_sp_port_pcpu_stats { ...@@ -211,7 +192,7 @@ struct mlxsw_sp_port_pcpu_stats {
}; };
struct mlxsw_sp_port_sample { struct mlxsw_sp_port_sample {
struct psample_group __rcu *psample_group; struct psample_group *psample_group;
u32 trunc_size; u32 trunc_size;
u32 rate; u32 rate;
bool truncate; bool truncate;
...@@ -274,21 +255,19 @@ struct mlxsw_sp_port { ...@@ -274,21 +255,19 @@ struct mlxsw_sp_port {
* the same localport can have * the same localport can have
* different mapping. * different mapping.
*/ */
/* TC handles */
struct list_head mall_tc_list;
struct { struct {
#define MLXSW_HW_STATS_UPDATE_TIME HZ #define MLXSW_HW_STATS_UPDATE_TIME HZ
struct rtnl_link_stats64 stats; struct rtnl_link_stats64 stats;
struct mlxsw_sp_port_xstats xstats; struct mlxsw_sp_port_xstats xstats;
struct delayed_work update_dw; struct delayed_work update_dw;
} periodic_hw_stats; } periodic_hw_stats;
struct mlxsw_sp_port_sample *sample; struct mlxsw_sp_port_sample __rcu *sample;
struct list_head vlans_list; struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan; struct mlxsw_sp_port_vlan *default_vlan;
struct mlxsw_sp_qdisc_state *qdisc; struct mlxsw_sp_qdisc_state *qdisc;
unsigned acl_rule_count; unsigned acl_rule_count;
struct mlxsw_sp_acl_block *ing_acl_block; struct mlxsw_sp_flow_block *ing_flow_block;
struct mlxsw_sp_acl_block *eg_acl_block; struct mlxsw_sp_flow_block *eg_flow_block;
struct { struct {
struct delayed_work shaper_dw; struct delayed_work shaper_dw;
struct hwtstamp_config hwtstamp_config; struct hwtstamp_config hwtstamp_config;
...@@ -654,17 +633,10 @@ struct mlxsw_sp_acl_rule_info { ...@@ -654,17 +633,10 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index; unsigned int counter_index;
}; };
struct mlxsw_sp_acl_block; /* spectrum_flow.c */
struct mlxsw_sp_acl_ruleset; struct mlxsw_sp_flow_block {
/* spectrum_acl.c */
enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_FLOWER,
MLXSW_SP_ACL_PROFILE_MR,
};
struct mlxsw_sp_acl_block {
struct list_head binding_list; struct list_head binding_list;
struct list_head mall_list;
struct mlxsw_sp_acl_ruleset *ruleset_zero; struct mlxsw_sp_acl_ruleset *ruleset_zero;
struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count; unsigned int rule_count;
...@@ -676,35 +648,92 @@ struct mlxsw_sp_acl_block { ...@@ -676,35 +648,92 @@ struct mlxsw_sp_acl_block {
struct net *net; struct net *net;
}; };
struct mlxsw_sp_flow_block_binding {
struct list_head list;
struct net_device *dev;
struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
};
static inline struct mlxsw_sp *
mlxsw_sp_flow_block_mlxsw_sp(struct mlxsw_sp_flow_block *block)
{
return block->mlxsw_sp;
}
static inline unsigned int
mlxsw_sp_flow_block_rule_count(const struct mlxsw_sp_flow_block *block)
{
return block ? block->rule_count : 0;
}
static inline void
mlxsw_sp_flow_block_disable_inc(struct mlxsw_sp_flow_block *block)
{
if (block)
block->disable_count++;
}
static inline void
mlxsw_sp_flow_block_disable_dec(struct mlxsw_sp_flow_block *block)
{
if (block)
block->disable_count--;
}
static inline bool
mlxsw_sp_flow_block_disabled(const struct mlxsw_sp_flow_block *block)
{
return block->disable_count;
}
static inline bool
mlxsw_sp_flow_block_is_egress_bound(const struct mlxsw_sp_flow_block *block)
{
return block->egress_binding_count;
}
static inline bool
mlxsw_sp_flow_block_is_ingress_bound(const struct mlxsw_sp_flow_block *block)
{
return block->ingress_binding_count;
}
static inline bool
mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block)
{
return block->ingress_binding_count && block->egress_binding_count;
}
struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp,
struct net *net);
void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block);
int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
/* spectrum_acl.c */
struct mlxsw_sp_acl_ruleset;
enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_FLOWER,
MLXSW_SP_ACL_PROFILE_MR,
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
unsigned int int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block); struct mlxsw_sp_flow_block *block,
void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block); struct mlxsw_sp_flow_block_binding *binding);
void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block); void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block); struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_flow_block_binding *binding);
struct net *net);
void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress,
struct netlink_ext_ack *extack);
int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block);
bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block);
bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block);
struct mlxsw_sp_acl_ruleset * struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index, struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile); enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset * struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index, struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile, enum mlxsw_sp_acl_profile profile,
struct mlxsw_afk_element_usage *tmplt_elusage); struct mlxsw_afk_element_usage *tmplt_elusage);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
...@@ -736,7 +765,7 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei, ...@@ -736,7 +765,7 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct net_device *out_dev, struct net_device *out_dev,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
...@@ -857,21 +886,31 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops; ...@@ -857,21 +886,31 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_matchall.c */
int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f);
void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f);
int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port);
/* spectrum_flower.c */ /* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f); struct flow_cls_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f); struct flow_cls_offload *f);
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f); struct flow_cls_offload *f);
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f); struct flow_cls_offload *f);
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f); struct flow_cls_offload *f);
/* spectrum_qdisc.c */ /* spectrum_qdisc.c */
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
struct mlxsw_sp2_mr_tcam { struct mlxsw_sp2_mr_tcam {
struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block; struct mlxsw_sp_flow_block *flow_block;
struct mlxsw_sp_acl_ruleset *ruleset4; struct mlxsw_sp_acl_ruleset *ruleset4;
struct mlxsw_sp_acl_ruleset *ruleset6; struct mlxsw_sp_acl_ruleset *ruleset6;
}; };
...@@ -61,7 +61,7 @@ static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam) ...@@ -61,7 +61,7 @@ static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
mlxsw_sp2_mr_tcam_usage_ipv4, mlxsw_sp2_mr_tcam_usage_ipv4,
ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4)); ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4));
mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp, mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
mr_tcam->acl_block, mr_tcam->flow_block,
MLXSW_SP_L3_PROTO_IPV4, MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_ACL_PROFILE_MR, MLXSW_SP_ACL_PROFILE_MR,
&elusage); &elusage);
...@@ -111,7 +111,7 @@ static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam) ...@@ -111,7 +111,7 @@ static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
mlxsw_sp2_mr_tcam_usage_ipv6, mlxsw_sp2_mr_tcam_usage_ipv6,
ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6)); ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6));
mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp, mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
mr_tcam->acl_block, mr_tcam->flow_block,
MLXSW_SP_L3_PROTO_IPV6, MLXSW_SP_L3_PROTO_IPV6,
MLXSW_SP_ACL_PROFILE_MR, MLXSW_SP_ACL_PROFILE_MR,
&elusage); &elusage);
...@@ -289,8 +289,8 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) ...@@ -289,8 +289,8 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
int err; int err;
mr_tcam->mlxsw_sp = mlxsw_sp; mr_tcam->mlxsw_sp = mlxsw_sp;
mr_tcam->acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, NULL); mr_tcam->flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, NULL);
if (!mr_tcam->acl_block) if (!mr_tcam->flow_block)
return -ENOMEM; return -ENOMEM;
err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam); err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam);
...@@ -306,7 +306,7 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) ...@@ -306,7 +306,7 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
err_ipv6_init: err_ipv6_init:
mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam); mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
err_ipv4_init: err_ipv4_init:
mlxsw_sp_acl_block_destroy(mr_tcam->acl_block); mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
return err; return err;
} }
...@@ -316,7 +316,7 @@ static void mlxsw_sp2_mr_tcam_fini(void *priv) ...@@ -316,7 +316,7 @@ static void mlxsw_sp2_mr_tcam_fini(void *priv)
mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam); mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam);
mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam); mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
mlxsw_sp_acl_block_destroy(mr_tcam->acl_block); mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
} }
const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = { const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
......
...@@ -40,15 +40,8 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) ...@@ -40,15 +40,8 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk; return acl->afk;
} }
struct mlxsw_sp_acl_block_binding {
struct list_head list;
struct net_device *dev;
struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
};
struct mlxsw_sp_acl_ruleset_ht_key { struct mlxsw_sp_acl_ruleset_ht_key {
struct mlxsw_sp_acl_block *block; struct mlxsw_sp_flow_block *block;
u32 chain_index; u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops; const struct mlxsw_sp_acl_profile_ops *ops;
}; };
...@@ -94,49 +87,6 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp) ...@@ -94,49 +87,6 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid; return mlxsw_sp->acl->dummy_fid;
} }
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
{
return block->mlxsw_sp;
}
unsigned int
mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block)
{
return block ? block->rule_count : 0;
}
void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
{
if (block)
block->disable_count++;
}
void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
{
if (block)
block->disable_count--;
}
bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block)
{
return block->disable_count;
}
bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block)
{
return block->egress_binding_count;
}
bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block)
{
return block->ingress_binding_count;
}
bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block)
{
return block->ingress_binding_count && block->egress_binding_count;
}
static bool static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset) mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{ {
...@@ -144,10 +94,9 @@ mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset) ...@@ -144,10 +94,9 @@ mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
return ruleset->ref_count == 2; return ruleset->ref_count == 2;
} }
static int int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block_binding *binding)
struct mlxsw_sp_acl_block_binding *binding)
{ {
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero; struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
...@@ -156,10 +105,9 @@ mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, ...@@ -156,10 +105,9 @@ mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress); binding->mlxsw_sp_port, binding->ingress);
} }
static void void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block_binding *binding)
struct mlxsw_sp_acl_block_binding *binding)
{ {
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero; struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
...@@ -168,18 +116,12 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, ...@@ -168,18 +116,12 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress); binding->mlxsw_sp_port, binding->ingress);
} }
static bool
mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_acl_block *block)
{
return block->ruleset_zero;
}
static int static int
mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset, struct mlxsw_sp_acl_ruleset *ruleset,
struct mlxsw_sp_acl_block *block) struct mlxsw_sp_flow_block *block)
{ {
struct mlxsw_sp_acl_block_binding *binding; struct mlxsw_sp_flow_block_binding *binding;
int err; int err;
block->ruleset_zero = ruleset; block->ruleset_zero = ruleset;
...@@ -202,122 +144,18 @@ mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp, ...@@ -202,122 +144,18 @@ mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
static void static void
mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset, struct mlxsw_sp_acl_ruleset *ruleset,
struct mlxsw_sp_acl_block *block) struct mlxsw_sp_flow_block *block)
{ {
struct mlxsw_sp_acl_block_binding *binding; struct mlxsw_sp_flow_block_binding *binding;
list_for_each_entry(binding, &block->binding_list, list) list_for_each_entry(binding, &block->binding_list, list)
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding); mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
block->ruleset_zero = NULL; block->ruleset_zero = NULL;
} }
struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
struct net *net)
{
struct mlxsw_sp_acl_block *block;
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
block->mlxsw_sp = mlxsw_sp;
block->net = net;
return block;
}
void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
{
WARN_ON(!list_empty(&block->binding_list));
kfree(block);
}
static struct mlxsw_sp_acl_block_binding *
mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
{
struct mlxsw_sp_acl_block_binding *binding;
list_for_each_entry(binding, &block->binding_list, list)
if (binding->mlxsw_sp_port == mlxsw_sp_port &&
binding->ingress == ingress)
return binding;
return NULL;
}
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_acl_block_binding *binding;
int err;
if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
if (ingress && block->ingress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
return -EOPNOTSUPP;
}
if (!ingress && block->egress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
return -EOPNOTSUPP;
}
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
if (!binding)
return -ENOMEM;
binding->mlxsw_sp_port = mlxsw_sp_port;
binding->ingress = ingress;
if (mlxsw_sp_acl_ruleset_block_bound(block)) {
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
if (err)
goto err_ruleset_bind;
}
if (ingress)
block->ingress_binding_count++;
else
block->egress_binding_count++;
list_add(&binding->list, &block->binding_list);
return 0;
err_ruleset_bind:
kfree(binding);
return err;
}
int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
struct mlxsw_sp_acl_block_binding *binding;
binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
if (!binding)
return -ENOENT;
list_del(&binding->list);
if (ingress)
block->ingress_binding_count--;
else
block->egress_binding_count--;
if (mlxsw_sp_acl_ruleset_block_bound(block))
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
kfree(binding);
return 0;
}
static struct mlxsw_sp_acl_ruleset * static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index, struct mlxsw_sp_flow_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops, const struct mlxsw_sp_acl_profile_ops *ops,
struct mlxsw_afk_element_usage *tmplt_elusage) struct mlxsw_afk_element_usage *tmplt_elusage)
{ {
...@@ -388,7 +226,7 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, ...@@ -388,7 +226,7 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_ruleset * static struct mlxsw_sp_acl_ruleset *
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
struct mlxsw_sp_acl_block *block, u32 chain_index, struct mlxsw_sp_flow_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops) const struct mlxsw_sp_acl_profile_ops *ops)
{ {
struct mlxsw_sp_acl_ruleset_ht_key ht_key; struct mlxsw_sp_acl_ruleset_ht_key ht_key;
...@@ -403,7 +241,7 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, ...@@ -403,7 +241,7 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
struct mlxsw_sp_acl_ruleset * struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index, struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile) enum mlxsw_sp_acl_profile profile)
{ {
const struct mlxsw_sp_acl_profile_ops *ops; const struct mlxsw_sp_acl_profile_ops *ops;
...@@ -421,7 +259,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, ...@@ -421,7 +259,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset * struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index, struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile, enum mlxsw_sp_acl_profile profile,
struct mlxsw_afk_element_usage *tmplt_elusage) struct mlxsw_afk_element_usage *tmplt_elusage)
{ {
...@@ -584,11 +422,11 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, ...@@ -584,11 +422,11 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct net_device *out_dev, struct net_device *out_dev,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct mlxsw_sp_acl_block_binding *binding; struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_port *in_port; struct mlxsw_sp_port *in_port;
if (!list_is_singular(&block->binding_list)) { if (!list_is_singular(&block->binding_list)) {
...@@ -596,7 +434,7 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, ...@@ -596,7 +434,7 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
binding = list_first_entry(&block->binding_list, binding = list_first_entry(&block->binding_list,
struct mlxsw_sp_acl_block_binding, list); struct mlxsw_sp_flow_block_binding, list);
in_port = binding->mlxsw_sp_port; in_port = binding->mlxsw_sp_port;
return mlxsw_afa_block_append_mirror(rulei->act_block, return mlxsw_afa_block_append_mirror(rulei->act_block,
...@@ -818,7 +656,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, ...@@ -818,7 +656,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
{ {
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
int err; int err;
err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
...@@ -862,18 +700,17 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, ...@@ -862,18 +700,17 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
{ {
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker; block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker; block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
ruleset->ht_key.block->rule_count--; block->rule_count--;
mutex_lock(&mlxsw_sp->acl->rules_lock); mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list); list_del(&rule->list);
mutex_unlock(&mlxsw_sp->acl->rules_lock); mutex_unlock(&mlxsw_sp->acl->rules_lock);
if (!ruleset->ht_key.chain_index && if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset)) mlxsw_sp_acl_ruleset_is_singular(ruleset))
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
ruleset->ht_key.block);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params); mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv); ops->rule_del(mlxsw_sp, rule->priv);
......
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <net/net_namespace.h>
#include "spectrum.h"
struct mlxsw_sp_flow_block *
mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
{
struct mlxsw_sp_flow_block *block;
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
INIT_LIST_HEAD(&block->mall_list);
block->mlxsw_sp = mlxsw_sp;
block->net = net;
return block;
}
void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block)
{
WARN_ON(!list_empty(&block->binding_list));
kfree(block);
}
static struct mlxsw_sp_flow_block_binding *
mlxsw_sp_flow_block_lookup(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
{
struct mlxsw_sp_flow_block_binding *binding;
list_for_each_entry(binding, &block->binding_list, list)
if (binding->mlxsw_sp_port == mlxsw_sp_port &&
binding->ingress == ingress)
return binding;
return NULL;
}
static bool
mlxsw_sp_flow_block_ruleset_bound(const struct mlxsw_sp_flow_block *block)
{
return block->ruleset_zero;
}
static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_flow_block_binding *binding;
int err;
if (WARN_ON(mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
if (ingress && block->ingress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
return -EOPNOTSUPP;
}
if (!ingress && block->egress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
return -EOPNOTSUPP;
}
err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port);
if (err)
return err;
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
if (!binding) {
err = -ENOMEM;
goto err_binding_alloc;
}
binding->mlxsw_sp_port = mlxsw_sp_port;
binding->ingress = ingress;
if (mlxsw_sp_flow_block_ruleset_bound(block)) {
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
if (err)
goto err_ruleset_bind;
}
if (ingress)
block->ingress_binding_count++;
else
block->egress_binding_count++;
list_add(&binding->list, &block->binding_list);
return 0;
err_ruleset_bind:
kfree(binding);
err_binding_alloc:
mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
return err;
}
static int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
struct mlxsw_sp_flow_block_binding *binding;
binding = mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress);
if (!binding)
return -ENOENT;
list_del(&binding->list);
if (ingress)
block->ingress_binding_count--;
else
block->egress_binding_count--;
if (mlxsw_sp_flow_block_ruleset_bound(block))
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
kfree(binding);
mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
return 0;
}
static int mlxsw_sp_flow_block_mall_cb(struct mlxsw_sp_flow_block *flow_block,
struct tc_cls_matchall_offload *f)
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return mlxsw_sp_mall_replace(flow_block, f);
case TC_CLSMATCHALL_DESTROY:
mlxsw_sp_mall_destroy(flow_block, f);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int mlxsw_sp_flow_block_flower_cb(struct mlxsw_sp_flow_block *flow_block,
struct flow_cls_offload *f)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
switch (f->command) {
case FLOW_CLS_REPLACE:
return mlxsw_sp_flower_replace(mlxsw_sp, flow_block, f);
case FLOW_CLS_DESTROY:
mlxsw_sp_flower_destroy(mlxsw_sp, flow_block, f);
return 0;
case FLOW_CLS_STATS:
return mlxsw_sp_flower_stats(mlxsw_sp, flow_block, f);
case FLOW_CLS_TMPLT_CREATE:
return mlxsw_sp_flower_tmplt_create(mlxsw_sp, flow_block, f);
case FLOW_CLS_TMPLT_DESTROY:
mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, flow_block, f);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int mlxsw_sp_flow_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct mlxsw_sp_flow_block *flow_block = cb_priv;
if (mlxsw_sp_flow_block_disabled(flow_block))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSMATCHALL:
return mlxsw_sp_flow_block_mall_cb(flow_block, type_data);
case TC_SETUP_CLSFLOWER:
return mlxsw_sp_flow_block_flower_cb(flow_block, type_data);
default:
return -EOPNOTSUPP;
}
}
static void mlxsw_sp_tc_block_release(void *cb_priv)
{
struct mlxsw_sp_flow_block *flow_block = cb_priv;
mlxsw_sp_flow_block_destroy(flow_block);
}
static LIST_HEAD(mlxsw_sp_block_cb_list);
static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f,
bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_flow_block *flow_block;
struct flow_block_cb *block_cb;
bool register_block = false;
int err;
block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
mlxsw_sp);
if (!block_cb) {
flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, f->net);
if (!flow_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(mlxsw_sp_flow_block_cb,
mlxsw_sp, flow_block,
mlxsw_sp_tc_block_release);
if (IS_ERR(block_cb)) {
mlxsw_sp_flow_block_destroy(flow_block);
err = PTR_ERR(block_cb);
goto err_cb_register;
}
register_block = true;
} else {
flow_block = flow_block_cb_priv(block_cb);
}
flow_block_cb_incref(block_cb);
err = mlxsw_sp_flow_block_bind(mlxsw_sp, flow_block,
mlxsw_sp_port, ingress, f->extack);
if (err)
goto err_block_bind;
if (ingress)
mlxsw_sp_port->ing_flow_block = flow_block;
else
mlxsw_sp_port->eg_flow_block = flow_block;
if (register_block) {
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
}
return 0;
err_block_bind:
if (!flow_block_cb_decref(block_cb))
flow_block_cb_free(block_cb);
err_cb_register:
return err;
}
static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f,
bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_flow_block *flow_block;
struct flow_block_cb *block_cb;
int err;
block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
mlxsw_sp);
if (!block_cb)
return;
if (ingress)
mlxsw_sp_port->ing_flow_block = NULL;
else
mlxsw_sp_port->eg_flow_block = NULL;
flow_block = flow_block_cb_priv(block_cb);
err = mlxsw_sp_flow_block_unbind(mlxsw_sp, flow_block,
mlxsw_sp_port, ingress);
if (!err && !flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
}
}
int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
bool ingress;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
ingress = true;
else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
ingress = false;
else
return -EOPNOTSUPP;
f->driver_block_list = &mlxsw_sp_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
return mlxsw_sp_setup_tc_block_bind(mlxsw_sp_port, f, ingress);
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_unbind(mlxsw_sp_port, f, ingress);
return 0;
default:
return -EOPNOTSUPP;
}
}
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include "core_acl_flex_keys.h" #include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_rule_info *rulei,
struct flow_action *flow_action, struct flow_action *flow_action,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
...@@ -53,11 +53,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, ...@@ -53,11 +53,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
case FLOW_ACTION_DROP: { case FLOW_ACTION_DROP: {
bool ingress; bool ingress;
if (mlxsw_sp_acl_block_is_mixed_bound(block)) { if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress"); NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
ingress = mlxsw_sp_acl_block_is_ingress_bound(block); ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress, err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
act->cookie, extack); act->cookie, extack);
if (err) { if (err) {
...@@ -106,7 +106,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, ...@@ -106,7 +106,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid; struct mlxsw_sp_fid *fid;
u16 fid_index; u16 fid_index;
if (mlxsw_sp_acl_block_is_egress_bound(block)) { if (mlxsw_sp_flow_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -190,7 +190,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, ...@@ -190,7 +190,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct mlxsw_sp_acl_block *block) struct mlxsw_sp_flow_block *block)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_port *mlxsw_sp_port;
...@@ -371,7 +371,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, ...@@ -371,7 +371,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
} }
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f) struct flow_cls_offload *f)
{ {
...@@ -460,7 +460,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, ...@@ -460,7 +460,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct flow_match_vlan match; struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match); flow_rule_match_vlan(rule, &match);
if (mlxsw_sp_acl_block_is_egress_bound(block)) { if (mlxsw_sp_flow_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -505,7 +505,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, ...@@ -505,7 +505,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
} }
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f) struct flow_cls_offload *f)
{ {
struct mlxsw_sp_acl_rule_info *rulei; struct mlxsw_sp_acl_rule_info *rulei;
...@@ -552,7 +552,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, ...@@ -552,7 +552,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
} }
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f) struct flow_cls_offload *f)
{ {
struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_ruleset *ruleset;
...@@ -574,7 +574,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, ...@@ -574,7 +574,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
} }
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f) struct flow_cls_offload *f)
{ {
enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED; enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
...@@ -611,7 +611,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, ...@@ -611,7 +611,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
} }
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f) struct flow_cls_offload *f)
{ {
struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_ruleset *ruleset;
...@@ -632,7 +632,7 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, ...@@ -632,7 +632,7 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
} }
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f) struct flow_cls_offload *f)
{ {
struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_ruleset *ruleset;
......
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <net/flow_offload.h>
#include "spectrum.h"
#include "spectrum_span.h"
#include "reg.h"
enum mlxsw_sp_mall_action_type {
MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
};
struct mlxsw_sp_mall_mirror_entry {
const struct net_device *to_dev;
int span_id;
};
struct mlxsw_sp_mall_entry {
struct list_head list;
unsigned long cookie;
enum mlxsw_sp_mall_action_type type;
bool ingress;
union {
struct mlxsw_sp_mall_mirror_entry mirror;
struct mlxsw_sp_port_sample sample;
};
struct rcu_head rcu;
};
static struct mlxsw_sp_mall_entry *
mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
{
struct mlxsw_sp_mall_entry *mall_entry;
list_for_each_entry(mall_entry, &block->mall_list, list)
if (mall_entry->cookie == cookie)
return mall_entry;
return NULL;
}
static int
mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_mall_entry *mall_entry)
{
enum mlxsw_sp_span_type span_type;
if (!mall_entry->mirror.to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
}
span_type = mall_entry->ingress ? MLXSW_SP_SPAN_INGRESS :
MLXSW_SP_SPAN_EGRESS;
return mlxsw_sp_span_mirror_add(mlxsw_sp_port,
mall_entry->mirror.to_dev,
span_type, true,
&mall_entry->mirror.span_id);
}
static void
mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_mall_entry *mall_entry)
{
enum mlxsw_sp_span_type span_type;
span_type = mall_entry->ingress ? MLXSW_SP_SPAN_INGRESS :
MLXSW_SP_SPAN_EGRESS;
mlxsw_sp_span_mirror_del(mlxsw_sp_port, mall_entry->mirror.span_id,
span_type, true);
}
static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable, u32 rate)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char mpsc_pl[MLXSW_REG_MPSC_LEN];
mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
}
static int
mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_mall_entry *mall_entry)
{
int err;
if (rtnl_dereference(mlxsw_sp_port->sample)) {
netdev_err(mlxsw_sp_port->dev, "sample already active\n");
return -EEXIST;
}
rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
mall_entry->sample.rate);
if (err)
goto err_port_sample_set;
return 0;
err_port_sample_set:
RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
return err;
}
static void
mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
{
if (!mlxsw_sp_port->sample)
return;
mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
}
static int
mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_mall_entry *mall_entry)
{
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
default:
WARN_ON(1);
return -EINVAL;
}
}
static void
mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_mall_entry *mall_entry)
{
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
break;
case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
break;
default:
WARN_ON(1);
}
}
int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f)
{
struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_mall_entry *mall_entry;
__be16 protocol = f->common.protocol;
struct flow_action_entry *act;
int err;
if (!flow_offload_has_one_action(&f->rule->action)) {
NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
return -EOPNOTSUPP;
}
if (f->common.chain_index) {
NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
return -EOPNOTSUPP;
}
if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
return -EOPNOTSUPP;
}
mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
if (!mall_entry)
return -ENOMEM;
mall_entry->cookie = f->cookie;
mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
act = &f->rule->action.entries[0];
if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
mall_entry->mirror.to_dev = act->dev;
} else if (act->id == FLOW_ACTION_SAMPLE &&
protocol == htons(ETH_P_ALL)) {
if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
err = -EOPNOTSUPP;
goto errout;
}
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
mall_entry->sample.psample_group = act->sample.psample_group;
mall_entry->sample.truncate = act->sample.truncate;
mall_entry->sample.trunc_size = act->sample.trunc_size;
mall_entry->sample.rate = act->sample.rate;
} else {
err = -EOPNOTSUPP;
goto errout;
}
list_for_each_entry(binding, &block->binding_list, list) {
err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
mall_entry);
if (err)
goto rollback;
}
block->rule_count++;
if (mall_entry->ingress)
block->egress_blocker_rule_count++;
else
block->ingress_blocker_rule_count++;
list_add_tail(&mall_entry->list, &block->mall_list);
return 0;
rollback:
list_for_each_entry_continue_reverse(binding, &block->binding_list,
list)
mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
errout:
kfree(mall_entry);
return err;
}
void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f)
{
struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_mall_entry *mall_entry;
mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
if (!mall_entry) {
NL_SET_ERR_MSG(f->common.extack, "Entry not found");
return;
}
list_del(&mall_entry->list);
if (mall_entry->ingress)
block->egress_blocker_rule_count--;
else
block->ingress_blocker_rule_count--;
block->rule_count--;
list_for_each_entry(binding, &block->binding_list, list)
mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
}
int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_mall_entry *mall_entry;
int err;
list_for_each_entry(mall_entry, &block->mall_list, list) {
err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
if (err)
goto rollback;
}
return 0;
rollback:
list_for_each_entry_continue_reverse(mall_entry, &block->mall_list,
list)
mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
return err;
}
void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_mall_entry *mall_entry;
list_for_each_entry(mall_entry, &block->mall_list, list)
mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
}
...@@ -2,7 +2,8 @@ ...@@ -2,7 +2,8 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \ ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \
mirred_egress_mirror_test gact_trap_test" mirred_egress_mirror_test matchall_mirred_egress_mirror_test \
gact_trap_test"
NUM_NETIFS=4 NUM_NETIFS=4
source tc_common.sh source tc_common.sh
source lib.sh source lib.sh
...@@ -50,6 +51,9 @@ switch_destroy() ...@@ -50,6 +51,9 @@ switch_destroy()
mirred_egress_test() mirred_egress_test()
{ {
local action=$1 local action=$1
local protocol=$2
local classifier=$3
local classifier_args=$4
RET=0 RET=0
...@@ -62,9 +66,9 @@ mirred_egress_test() ...@@ -62,9 +66,9 @@ mirred_egress_test()
tc_check_packets "dev $h2 ingress" 101 1 tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched without redirect rule inserted" check_fail $? "Matched without redirect rule inserted"
tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 flower \ tc filter add dev $swp1 ingress protocol $protocol pref 1 handle 101 \
$tcflags dst_ip 192.0.2.2 action mirred egress $action \ $classifier $tcflags $classifier_args \
dev $swp2 action mirred egress $action dev $swp2
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q -t ip -q
...@@ -72,10 +76,11 @@ mirred_egress_test() ...@@ -72,10 +76,11 @@ mirred_egress_test()
tc_check_packets "dev $h2 ingress" 101 1 tc_check_packets "dev $h2 ingress" 101 1
check_err $? "Did not match incoming $action packet" check_err $? "Did not match incoming $action packet"
tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower tc filter del dev $swp1 ingress protocol $protocol pref 1 handle 101 \
$classifier
tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
log_test "mirred egress $action ($tcflags)" log_test "mirred egress $classifier $action ($tcflags)"
} }
gact_drop_and_ok_test() gact_drop_and_ok_test()
...@@ -187,12 +192,17 @@ cleanup() ...@@ -187,12 +192,17 @@ cleanup()
mirred_egress_redirect_test() mirred_egress_redirect_test()
{ {
mirred_egress_test "redirect" mirred_egress_test "redirect" "ip" "flower" "dst_ip 192.0.2.2"
} }
mirred_egress_mirror_test() mirred_egress_mirror_test()
{ {
mirred_egress_test "mirror" mirred_egress_test "mirror" "ip" "flower" "dst_ip 192.0.2.2"
}
matchall_mirred_egress_mirror_test()
{
mirred_egress_test "mirror" "all" "matchall" ""
} }
trap cleanup EXIT trap cleanup EXIT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment