Commit d52238eb authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

mlxsw: spectrum: Push flow_block related functions into a separate file

The code around flow_block is currently mixed in spectrum_acl.c.
However, as it really does not directly relate to ACL part only,
push the bits into a separate file.
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3bc3ffb6
...@@ -21,6 +21,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ ...@@ -21,6 +21,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_atcam.o spectrum_acl_erp.o \ spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
spectrum_acl_bloom_filter.o spectrum_acl.o \ spectrum_acl_bloom_filter.o spectrum_acl.o \
spectrum_flow.o \
spectrum_flower.o spectrum_cnt.o \ spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \ spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \ spectrum_acl_flex_actions.o \
......
...@@ -654,15 +654,7 @@ struct mlxsw_sp_acl_rule_info { ...@@ -654,15 +654,7 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index; unsigned int counter_index;
}; };
struct mlxsw_sp_flow_block; /* spectrum_flow.c */
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_FLOWER,
MLXSW_SP_ACL_PROFILE_MR,
};
struct mlxsw_sp_flow_block { struct mlxsw_sp_flow_block {
struct list_head binding_list; struct list_head binding_list;
struct mlxsw_sp_acl_ruleset *ruleset_zero; struct mlxsw_sp_acl_ruleset *ruleset_zero;
...@@ -676,7 +668,12 @@ struct mlxsw_sp_flow_block { ...@@ -676,7 +668,12 @@ struct mlxsw_sp_flow_block {
struct net *net; struct net *net;
}; };
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); struct mlxsw_sp_flow_block_binding {
struct list_head list;
struct net_device *dev;
struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
};
static inline struct mlxsw_sp * static inline struct mlxsw_sp *
mlxsw_sp_flow_block_mlxsw_sp(struct mlxsw_sp_flow_block *block) mlxsw_sp_flow_block_mlxsw_sp(struct mlxsw_sp_flow_block *block)
...@@ -740,6 +737,23 @@ int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp, ...@@ -740,6 +737,23 @@ int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block, struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress); bool ingress);
/* spectrum_acl.c */
struct mlxsw_sp_acl_ruleset;
enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_FLOWER,
MLXSW_SP_ACL_PROFILE_MR,
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_flow_block_binding *binding);
void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_flow_block_binding *binding);
struct mlxsw_sp_acl_ruleset * struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block, u32 chain_index, struct mlxsw_sp_flow_block *block, u32 chain_index,
......
...@@ -40,13 +40,6 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) ...@@ -40,13 +40,6 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk; return acl->afk;
} }
struct mlxsw_sp_flow_block_binding {
struct list_head list;
struct net_device *dev;
struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
};
struct mlxsw_sp_acl_ruleset_ht_key { struct mlxsw_sp_acl_ruleset_ht_key {
struct mlxsw_sp_flow_block *block; struct mlxsw_sp_flow_block *block;
u32 chain_index; u32 chain_index;
...@@ -101,8 +94,7 @@ mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset) ...@@ -101,8 +94,7 @@ mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
return ruleset->ref_count == 2; return ruleset->ref_count == 2;
} }
static int int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block, struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_flow_block_binding *binding) struct mlxsw_sp_flow_block_binding *binding)
{ {
...@@ -113,8 +105,7 @@ mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, ...@@ -113,8 +105,7 @@ mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress); binding->mlxsw_sp_port, binding->ingress);
} }
static void void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block, struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_flow_block_binding *binding) struct mlxsw_sp_flow_block_binding *binding)
{ {
...@@ -125,12 +116,6 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, ...@@ -125,12 +116,6 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress); binding->mlxsw_sp_port, binding->ingress);
} }
static bool
mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_flow_block *block)
{
return block->ruleset_zero;
}
static int static int
mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset, struct mlxsw_sp_acl_ruleset *ruleset,
...@@ -168,110 +153,6 @@ mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp, ...@@ -168,110 +153,6 @@ mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
block->ruleset_zero = NULL; block->ruleset_zero = NULL;
} }
struct mlxsw_sp_flow_block *
mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
{
struct mlxsw_sp_flow_block *block;
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
block->mlxsw_sp = mlxsw_sp;
block->net = net;
return block;
}
void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block)
{
WARN_ON(!list_empty(&block->binding_list));
kfree(block);
}
static struct mlxsw_sp_flow_block_binding *
mlxsw_sp_flow_block_lookup(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
{
struct mlxsw_sp_flow_block_binding *binding;
list_for_each_entry(binding, &block->binding_list, list)
if (binding->mlxsw_sp_port == mlxsw_sp_port &&
binding->ingress == ingress)
return binding;
return NULL;
}
int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_flow_block_binding *binding;
int err;
if (WARN_ON(mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
if (ingress && block->ingress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
return -EOPNOTSUPP;
}
if (!ingress && block->egress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
return -EOPNOTSUPP;
}
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
if (!binding)
return -ENOMEM;
binding->mlxsw_sp_port = mlxsw_sp_port;
binding->ingress = ingress;
if (mlxsw_sp_acl_ruleset_block_bound(block)) {
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
if (err)
goto err_ruleset_bind;
}
if (ingress)
block->ingress_binding_count++;
else
block->egress_binding_count++;
list_add(&binding->list, &block->binding_list);
return 0;
err_ruleset_bind:
kfree(binding);
return err;
}
int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
struct mlxsw_sp_flow_block_binding *binding;
binding = mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress);
if (!binding)
return -ENOENT;
list_del(&binding->list);
if (ingress)
block->ingress_binding_count--;
else
block->egress_binding_count--;
if (mlxsw_sp_acl_ruleset_block_bound(block))
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
kfree(binding);
return 0;
}
static struct mlxsw_sp_acl_ruleset * static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block, u32 chain_index, struct mlxsw_sp_flow_block *block, u32 chain_index,
......
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <net/net_namespace.h>
#include "spectrum.h"
struct mlxsw_sp_flow_block *
mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
{
struct mlxsw_sp_flow_block *block;
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
block->mlxsw_sp = mlxsw_sp;
block->net = net;
return block;
}
void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block)
{
WARN_ON(!list_empty(&block->binding_list));
kfree(block);
}
static struct mlxsw_sp_flow_block_binding *
mlxsw_sp_flow_block_lookup(struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
{
struct mlxsw_sp_flow_block_binding *binding;
list_for_each_entry(binding, &block->binding_list, list)
if (binding->mlxsw_sp_port == mlxsw_sp_port &&
binding->ingress == ingress)
return binding;
return NULL;
}
static bool
mlxsw_sp_flow_block_ruleset_bound(const struct mlxsw_sp_flow_block *block)
{
return block->ruleset_zero;
}
int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_flow_block_binding *binding;
int err;
if (WARN_ON(mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
if (ingress && block->ingress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
return -EOPNOTSUPP;
}
if (!ingress && block->egress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
return -EOPNOTSUPP;
}
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
if (!binding)
return -ENOMEM;
binding->mlxsw_sp_port = mlxsw_sp_port;
binding->ingress = ingress;
if (mlxsw_sp_flow_block_ruleset_bound(block)) {
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
if (err)
goto err_ruleset_bind;
}
if (ingress)
block->ingress_binding_count++;
else
block->egress_binding_count++;
list_add(&binding->list, &block->binding_list);
return 0;
err_ruleset_bind:
kfree(binding);
return err;
}
int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
struct mlxsw_sp_flow_block_binding *binding;
binding = mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress);
if (!binding)
return -ENOENT;
list_del(&binding->list);
if (ingress)
block->ingress_binding_count--;
else
block->egress_binding_count--;
if (mlxsw_sp_flow_block_ruleset_bound(block))
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
kfree(binding);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment