Commit ce87a957 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlxsw-add-spectrum-1-ip6gre-support'

Petr Machata says:

====================
mlxsw: Add Spectrum-1 ip6gre support

Ido Schimmel writes:

Currently, mlxsw only supports ip6gre offload on Spectrum-2 and newer
ASICs. Spectrum-1 can also offload ip6gre tunnels, but it needs double
entry router interfaces (RIFs) for the RIFs representing these tunnels.
In addition, the RIF index needs to be even. This is handled in
patches #1-#3.

The implementation can otherwise be shared between all Spectrum
generations. This is handled in patches #4-#5.

Patch #6 moves a mlxsw ip6gre selftest to a shared directory, as ip6gre
is no longer only supported on Spectrum-2 and newer ASICs.

This work is motivated by users that require multiple GRE tunnels that
all share the same underlay VRF. Currently, mlxsw only supports
decapsulation based on the underlay destination IP (i.e., not taking the
GRE key into account), so users need to configure these tunnels with
different source IPs and IPv6 addresses are easier to spare than IPv4.

Tested using existing ip6gre forwarding selftests.
====================

Link: https://lore.kernel.org/r/cover.1670414573.git.petrm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 837e8ac8 db401875
......@@ -363,93 +363,7 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
};
static struct mlxsw_sp_ipip_parms
mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
{
struct mlxsw_sp_ipip_parms parms = {0};
WARN_ON_ONCE(1);
return parms;
}
static int
mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
bool force, char *ratr_pl)
{
WARN_ON_ONCE(1);
return -EINVAL;
}
static int
mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
u32 tunnel_index)
{
WARN_ON_ONCE(1);
return -EINVAL;
}
static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
return false;
}
static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
struct mlxsw_sp_rif_ipip_lb_config config = {0};
WARN_ON_ONCE(1);
return config;
}
static int
mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack)
{
WARN_ON_ONCE(1);
return -EINVAL;
}
static int
mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
WARN_ON_ONCE(1);
return -EINVAL;
}
static void
mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
WARN_ON_ONCE(1);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = {
.dev_type = ARPHRD_IP6GRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV6,
.inc_parsing_depth = true,
.parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6,
.nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6,
.decap_config = mlxsw_sp1_ipip_decap_config_gre6,
.can_offload = mlxsw_sp1_ipip_can_offload_gre6,
.ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6,
.ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6,
.rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6,
.rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6,
};
const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
[MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops,
};
static struct mlxsw_sp_ipip_parms
mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
mlxsw_sp_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
{
struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
......@@ -464,9 +378,9 @@ mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
}
static int
mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
bool force, char *ratr_pl)
mlxsw_sp_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
bool force, char *ratr_pl)
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
enum mlxsw_reg_ratr_op op;
......@@ -482,9 +396,9 @@ mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
}
static int
mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
u32 tunnel_index)
mlxsw_sp_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
u32 tunnel_index)
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
......@@ -519,8 +433,8 @@ mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
}
static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
static bool mlxsw_sp_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
......@@ -534,8 +448,8 @@ static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
mlxsw_sp_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
......@@ -553,20 +467,20 @@ mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
}
static int
mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack)
mlxsw_sp_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_ipip_parms new_parms;
new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
new_parms = mlxsw_sp_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
&new_parms, extack);
}
static int
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
mlxsw_sp_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
&ipip_entry->parms.daddr.addr6,
......@@ -574,24 +488,44 @@ mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
}
static void
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
mlxsw_sp_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = {
.dev_type = ARPHRD_IP6GRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV6,
.inc_parsing_depth = true,
.double_rif_entry = true,
.parms_init = mlxsw_sp_ipip_netdev_parms_init_gre6,
.nexthop_update = mlxsw_sp_ipip_nexthop_update_gre6,
.decap_config = mlxsw_sp_ipip_decap_config_gre6,
.can_offload = mlxsw_sp_ipip_can_offload_gre6,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre6,
.ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre6,
.rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre6,
.rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre6,
};
const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
[MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops,
};
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
.dev_type = ARPHRD_IP6GRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV6,
.inc_parsing_depth = true,
.parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6,
.nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6,
.decap_config = mlxsw_sp2_ipip_decap_config_gre6,
.can_offload = mlxsw_sp2_ipip_can_offload_gre6,
.ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6,
.ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6,
.rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6,
.rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6,
.parms_init = mlxsw_sp_ipip_netdev_parms_init_gre6,
.nexthop_update = mlxsw_sp_ipip_nexthop_update_gre6,
.decap_config = mlxsw_sp_ipip_decap_config_gre6,
.can_offload = mlxsw_sp_ipip_can_offload_gre6,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre6,
.ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre6,
.rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre6,
.rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre6,
};
const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = {
......
......@@ -49,6 +49,7 @@ struct mlxsw_sp_ipip_ops {
int dev_type;
enum mlxsw_sp_l3proto ul_proto; /* Underlay. */
bool inc_parsing_depth;
bool double_rif_entry;
struct mlxsw_sp_ipip_parms
(*parms_init)(const struct net_device *ol_dev);
......
......@@ -18,6 +18,7 @@
#include <linux/jhash.h>
#include <linux/net_namespace.h>
#include <linux/mutex.h>
#include <linux/genalloc.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
......@@ -59,6 +60,7 @@ struct mlxsw_sp_rif {
int mtu;
u16 rif_index;
u8 mac_profile_id;
u8 rif_entries;
u16 vr_id;
const struct mlxsw_sp_rif_ops *ops;
struct mlxsw_sp *mlxsw_sp;
......@@ -77,6 +79,7 @@ struct mlxsw_sp_rif_params {
};
u16 vid;
bool lag;
bool double_entry;
};
struct mlxsw_sp_rif_subport {
......@@ -1068,6 +1071,7 @@ mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
.common.dev = ol_dev,
.common.lag = false,
.common.double_entry = ipip_ops->double_rif_entry,
.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
};
......@@ -7826,18 +7830,26 @@ mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
}
static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
u8 rif_entries)
{
int i;
*p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
rif_entries);
if (*p_rif_index == 0)
return -ENOBUFS;
*p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
if (!mlxsw_sp->router->rifs[i]) {
*p_rif_index = i;
return 0;
}
}
/* RIF indexes must be aligned to the allocation size. */
WARN_ON_ONCE(*p_rif_index % rif_entries);
return -ENOBUFS;
return 0;
}
static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
u8 rif_entries)
{
gen_pool_free(mlxsw_sp->router->rifs_table,
MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
}
static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
......@@ -8081,6 +8093,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack)
{
u8 rif_entries = params->double_entry ? 2 : 1;
u32 tb_id = l3mdev_fib_table(params->dev);
const struct mlxsw_sp_rif_ops *ops;
struct mlxsw_sp_fid *fid = NULL;
......@@ -8098,7 +8111,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
return ERR_CAST(vr);
vr->rif_count++;
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
goto err_rif_index_alloc;
......@@ -8113,6 +8126,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router->rifs[rif_index] = rif;
rif->mlxsw_sp = mlxsw_sp;
rif->ops = ops;
rif->rif_entries = rif_entries;
if (ops->fid_get) {
fid = ops->fid_get(rif, extack);
......@@ -8146,7 +8160,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_counters_alloc(rif);
}
atomic_inc(&mlxsw_sp->router->rifs_count);
atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
return rif;
err_stats_enable:
......@@ -8162,6 +8176,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
dev_put(rif->dev);
kfree(rif);
err_rif_alloc:
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
err_rif_index_alloc:
vr->rif_count--;
mlxsw_sp_vr_put(mlxsw_sp, vr);
......@@ -8173,10 +8188,12 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
const struct mlxsw_sp_rif_ops *ops = rif->ops;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
u8 rif_entries = rif->rif_entries;
u16 rif_index = rif->rif_index;
struct mlxsw_sp_vr *vr;
int i;
atomic_dec(&mlxsw_sp->router->rifs_count);
atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
......@@ -8198,6 +8215,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
dev_put(rif->dev);
kfree(rif);
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
vr->rif_count--;
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
......@@ -9771,42 +9789,51 @@ mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif *ul_rif;
u8 rif_entries = 1;
u16 rif_index;
int err;
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
return ERR_PTR(err);
}
ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
if (!ul_rif)
return ERR_PTR(-ENOMEM);
if (!ul_rif) {
err = -ENOMEM;
goto err_rif_alloc;
}
mlxsw_sp->router->rifs[rif_index] = ul_rif;
ul_rif->mlxsw_sp = mlxsw_sp;
ul_rif->rif_entries = rif_entries;
err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
if (err)
goto ul_rif_op_err;
atomic_inc(&mlxsw_sp->router->rifs_count);
atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
return ul_rif;
ul_rif_op_err:
mlxsw_sp->router->rifs[rif_index] = NULL;
kfree(ul_rif);
err_rif_alloc:
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
return ERR_PTR(err);
}
static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
{
struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
u8 rif_entries = ul_rif->rif_entries;
u16 rif_index = ul_rif->rif_index;
atomic_dec(&mlxsw_sp->router->rifs_count);
atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
kfree(ul_rif);
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
}
static struct mlxsw_sp_rif *
......@@ -9940,11 +9967,43 @@ static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct gen_pool *rifs_table;
int err;
rifs_table = gen_pool_create(0, -1);
if (!rifs_table)
return -ENOMEM;
gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
NULL);
err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
if (err)
goto err_gen_pool_add;
mlxsw_sp->router->rifs_table = rifs_table;
return 0;
err_gen_pool_add:
gen_pool_destroy(rifs_table);
return err;
}
static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
{
gen_pool_destroy(mlxsw_sp->router->rifs_table);
}
static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
{
u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_core *core = mlxsw_sp->core;
int err;
if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
return -EIO;
......@@ -9957,6 +10016,10 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->router->rifs)
return -ENOMEM;
err = mlxsw_sp_rifs_table_init(mlxsw_sp);
if (err)
goto err_rifs_table_init;
idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
atomic_set(&mlxsw_sp->router->rifs_count, 0);
......@@ -9970,6 +10033,10 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp);
return 0;
err_rifs_table_init:
kfree(mlxsw_sp->router->rifs);
return err;
}
static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
......@@ -9986,6 +10053,7 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
mlxsw_sp_rifs_table_fini(mlxsw_sp);
kfree(mlxsw_sp->router->rifs);
}
......
......@@ -15,8 +15,12 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */
#define MLXSW_SP_ROUTER_GENALLOC_OFFSET 0x100
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct gen_pool *rifs_table;
struct mlxsw_sp_rif **rifs;
struct idr rif_mac_profiles_idr;
atomic_t rif_mac_profiles_count;
......
......@@ -31,7 +31,7 @@
# | 2001:db8:10::2/64 |
# +-------------------------+
lib_dir=$(dirname $0)/../../../../net/forwarding
lib_dir=$(dirname $0)/../../../net/forwarding
ALL_TESTS="
decap_error_test
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment