Commit a2e4a219 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Add-support-for-offloading-IPv4-multicast-routes'

Jiri Pirko says:

====================
mlxsw: Add support for offloading IPv4 multicast routes

Yotam says:

This patch-set introduces offloading of the kernel IPv4 multicast router
logic in the Spectrum driver.

The first patch makes the Spectrum driver ignore FIB notifications that are
not of address family IPv4 or IPv6. This is needed in order to prevent
crashes while the next patches introduce the RTNL_FAMILY_IPMR FIB
notifications.

Patches 2-5 update ipmr to use the FIB notification chain for both MFC and
VIF notifications, and patches 8-12 update the Spectrum driver to register
to these notifications and offload the routes.

Similarly to IPv4 and IPv6, any failure will trigger the abort mechanism
which is updated in this patch-set to eject multicast route tables too.

At this stage, the following limitations apply:
 - A multicast MFC route will be offloaded by the driver if all the output
   interfaces are Spectrum router interfaces (RIFs). In any other case
   (which includes pimreg device, tunnel devices and management ports) the
   route will be trapped to the CPU and the packets will be forwarded by
   software.
 - ipmr proxy routes are not supported and will trigger the abort
   mechanism.
 - The MFC TTL values are currently treated as boolean: if the value is
   different than 255, the traffic is forwarded to the interface and if the
   value is 255 it is not forwarded. Dropping packets based on their TTL isn't
   currently supported.

To allow users to have visibility on which of the routes are offloaded and
which are not, patch 6 introduces a per-route offload indication similar to
IPv4 and IPv6 routes which is sent to the user via the RTNetlink interface.

The Spectrum driver multicast router offloading support, which is
introduced in patches 8 and 9, is divided into two parts:
 - The hardware logic which abstracts the Spectrum hardware and provides a
   simple API for the upper levels.
 - The offloading logic which gets the MFC and VIF notifications from the
   kernel and updates the hardware using the hardware logic part.

Finally, the last patch makes the Spectrum router logic not ignore the
multicast FIB notifications and call the corresponding functions in the
multicast router offloading logic.

---
v2->v3:
 - Move the ipmr_rule_default function definition to be inside the already
   existing CONFIG_IP_MROUTE_MULTIPLE_TABLES ifdef block (patch 6)
 - Remove double =0 initialization in spectrum_mr.c (patch 7)
 - Fix route4 allocation size (patch 7)
v1->v2:
 - Add comments for struct fields in mroute.h
 - Take the mrt_lock while dumping VIFs in the fib_notifier dump callback
 - Update the MFC lastuse field too
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1ca94d79 664375e9
...@@ -17,7 +17,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ ...@@ -17,7 +17,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_kvdl.o spectrum_acl_tcam.o \ spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \ spectrum_acl.o spectrum_flower.o \
spectrum_cnt.o spectrum_fid.o \ spectrum_cnt.o spectrum_fid.o \
spectrum_ipip.o spectrum_acl_flex_actions.o spectrum_ipip.o spectrum_acl_flex_actions.o \
spectrum_mr.o spectrum_mr_tcam.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
......
...@@ -139,6 +139,7 @@ struct mlxsw_sp_port_mall_tc_entry { ...@@ -139,6 +139,7 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_sb; struct mlxsw_sp_sb;
struct mlxsw_sp_bridge; struct mlxsw_sp_bridge;
struct mlxsw_sp_router; struct mlxsw_sp_router;
struct mlxsw_sp_mr;
struct mlxsw_sp_acl; struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool; struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core; struct mlxsw_sp_fid_core;
...@@ -153,6 +154,7 @@ struct mlxsw_sp { ...@@ -153,6 +154,7 @@ struct mlxsw_sp {
struct mlxsw_sp_sb *sb; struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge; struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router; struct mlxsw_sp_router *router;
struct mlxsw_sp_mr *mr;
struct mlxsw_afa *afa; struct mlxsw_afa *afa;
struct mlxsw_sp_acl *acl; struct mlxsw_sp_acl *acl;
struct mlxsw_sp_fid_core *fid_core; struct mlxsw_sp_fid_core *fid_core;
......
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/rhashtable.h>
#include "spectrum_mr.h"
#include "spectrum_router.h"
struct mlxsw_sp_mr {
const struct mlxsw_sp_mr_ops *mr_ops;
void *catchall_route_priv;
struct delayed_work stats_update_dw;
struct list_head table_list;
#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
unsigned long priv[0];
/* priv has to be always the last item */
};
struct mlxsw_sp_mr_vif {
struct net_device *dev;
const struct mlxsw_sp_rif *rif;
unsigned long vif_flags;
/* A list of route_vif_entry structs that point to routes that the VIF
* instance is used as one of the egress VIFs
*/
struct list_head route_evif_list;
/* A list of route_vif_entry structs that point to routes that the VIF
* instance is used as an ingress VIF
*/
struct list_head route_ivif_list;
};
struct mlxsw_sp_mr_route_vif_entry {
struct list_head vif_node;
struct list_head route_node;
struct mlxsw_sp_mr_vif *mr_vif;
struct mlxsw_sp_mr_route *mr_route;
};
struct mlxsw_sp_mr_table {
struct list_head node;
enum mlxsw_sp_l3proto proto;
struct mlxsw_sp *mlxsw_sp;
u32 vr_id;
struct mlxsw_sp_mr_vif vifs[MAXVIFS];
struct list_head route_list;
struct rhashtable route_ht;
char catchall_route_priv[0];
/* catchall_route_priv has to be always the last item */
};
struct mlxsw_sp_mr_route {
struct list_head node;
struct rhash_head ht_node;
struct mlxsw_sp_mr_route_key key;
enum mlxsw_sp_mr_route_action route_action;
u16 min_mtu;
struct mfc_cache *mfc4;
void *route_priv;
const struct mlxsw_sp_mr_table *mr_table;
/* A list of route_vif_entry structs that point to the egress VIFs */
struct list_head evif_list;
/* A route_vif_entry struct that point to the ingress VIF */
struct mlxsw_sp_mr_route_vif_entry ivif;
};
static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
.key_len = sizeof(struct mlxsw_sp_mr_route_key),
.key_offset = offsetof(struct mlxsw_sp_mr_route, key),
.head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
.automatic_shrinking = true,
};
static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
{
return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
}
static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
{
return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
}
static bool mlxsw_sp_mr_vif_rif_invalid(const struct mlxsw_sp_mr_vif *vif)
{
return mlxsw_sp_mr_vif_regular(vif) && vif->dev && !vif->rif;
}
static bool
mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
{
vifi_t ivif;
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
ivif = mr_route->mfc4->mfc_parent;
return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
default:
WARN_ON_ONCE(1);
}
return false;
}
static int
mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
{
struct mlxsw_sp_mr_route_vif_entry *rve;
int valid_evifs;
valid_evifs = 0;
list_for_each_entry(rve, &mr_route->evif_list, route_node)
if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
valid_evifs++;
return valid_evifs;
}
static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
{
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
return mr_route->key.source_mask.addr4 == INADDR_ANY;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
default:
WARN_ON_ONCE(1);
}
return false;
}
static enum mlxsw_sp_mr_route_action
mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
{
struct mlxsw_sp_mr_route_vif_entry *rve;
/* If the ingress port is not regular and resolved, trap the route */
if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
/* The kernel does not match a (*,G) route that the ingress interface is
* not one of the egress interfaces, so trap these kind of routes.
*/
if (mlxsw_sp_mr_route_starg(mr_route) &&
!mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
/* If the route has no valid eVIFs, trap it. */
if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
/* If either one of the eVIFs is not regular (VIF of type pimreg or
* tunnel) or one of the VIFs has no matching RIF, trap the packet.
*/
list_for_each_entry(rve, &mr_route->evif_list, route_node) {
if (!mlxsw_sp_mr_vif_regular(rve->mr_vif) ||
mlxsw_sp_mr_vif_rif_invalid(rve->mr_vif))
return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
}
return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
}
static enum mlxsw_sp_mr_route_prio
mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
{
return mlxsw_sp_mr_route_starg(mr_route) ?
MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
}
static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route_key *key,
const struct mfc_cache *mfc)
{
bool starg = (mfc->mfc_origin == INADDR_ANY);
memset(key, 0, sizeof(*key));
key->vrid = mr_table->vr_id;
key->proto = mr_table->proto;
key->group.addr4 = mfc->mfc_mcastgrp;
key->group_mask.addr4 = 0xffffffff;
key->source.addr4 = mfc->mfc_origin;
key->source_mask.addr4 = starg ? 0 : 0xffffffff;
}
static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
struct mlxsw_sp_mr_vif *mr_vif)
{
struct mlxsw_sp_mr_route_vif_entry *rve;
rve = kzalloc(sizeof(*rve), GFP_KERNEL);
if (!rve)
return -ENOMEM;
rve->mr_route = mr_route;
rve->mr_vif = mr_vif;
list_add_tail(&rve->route_node, &mr_route->evif_list);
list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
return 0;
}
static void
mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
{
list_del(&rve->route_node);
list_del(&rve->vif_node);
kfree(rve);
}
static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
struct mlxsw_sp_mr_vif *mr_vif)
{
mr_route->ivif.mr_route = mr_route;
mr_route->ivif.mr_vif = mr_vif;
list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
}
static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
{
list_del(&mr_route->ivif.vif_node);
}
static int
mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route *mr_route,
struct mlxsw_sp_mr_route_info *route_info)
{
struct mlxsw_sp_mr_route_vif_entry *rve;
u16 *erif_indices;
u16 irif_index;
u16 erif = 0;
erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
GFP_KERNEL);
if (!erif_indices)
return -ENOMEM;
list_for_each_entry(rve, &mr_route->evif_list, route_node) {
if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
erif_indices[erif++] = rifi;
}
}
if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
else
irif_index = 0;
route_info->irif_index = irif_index;
route_info->erif_indices = erif_indices;
route_info->min_mtu = mr_route->min_mtu;
route_info->route_action = mr_route->route_action;
route_info->erif_num = erif;
return 0;
}
static void
mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
{
kfree(route_info->erif_indices);
}
static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route *mr_route,
bool replace)
{
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
struct mlxsw_sp_mr_route_info route_info;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
int err;
err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
if (err)
return err;
if (!replace) {
struct mlxsw_sp_mr_route_params route_params;
mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
GFP_KERNEL);
if (!mr_route->route_priv) {
err = -ENOMEM;
goto out;
}
route_params.key = mr_route->key;
route_params.value = route_info;
route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
mr_route->route_priv,
&route_params);
if (err)
kfree(mr_route->route_priv);
} else {
err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
&route_info);
}
out:
mlxsw_sp_mr_route_info_destroy(&route_info);
return err;
}
static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route *mr_route)
{
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
kfree(mr_route->route_priv);
}
static struct mlxsw_sp_mr_route *
mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
struct mfc_cache *mfc)
{
struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
struct mlxsw_sp_mr_route *mr_route;
int err;
int i;
/* Allocate and init a new route and fill it with parameters */
mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
if (!mr_route)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&mr_route->evif_list);
mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);
/* Find min_mtu and link iVIF and eVIFs */
mr_route->min_mtu = ETH_MAX_MTU;
ipmr_cache_hold(mfc);
mr_route->mfc4 = mfc;
mr_route->mr_table = mr_table;
for (i = 0; i < MAXVIFS; i++) {
if (mfc->mfc_un.res.ttls[i] != 255) {
err = mlxsw_sp_mr_route_evif_link(mr_route,
&mr_table->vifs[i]);
if (err)
goto err;
if (mr_table->vifs[i].dev &&
mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
}
}
mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
if (err)
goto err;
mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
return mr_route;
err:
ipmr_cache_put(mfc);
list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
mlxsw_sp_mr_route_evif_unlink(rve);
kfree(mr_route);
return ERR_PTR(err);
}
static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route *mr_route)
{
struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
mlxsw_sp_mr_route_ivif_unlink(mr_route);
ipmr_cache_put(mr_route->mfc4);
list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
mlxsw_sp_mr_route_evif_unlink(rve);
kfree(mr_route);
}
static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route *mr_route)
{
switch (mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
default:
WARN_ON_ONCE(1);
}
}
static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
bool offload)
{
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
if (offload)
mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
else
mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
default:
WARN_ON_ONCE(1);
}
}
static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
{
bool offload;
offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
}
static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route *mr_route)
{
mlxsw_sp_mr_mfc_offload_set(mr_route, false);
mlxsw_sp_mr_route_erase(mr_table, mr_route);
rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
mlxsw_sp_mr_route_ht_params);
list_del(&mr_route->node);
mlxsw_sp_mr_route_destroy(mr_table, mr_route);
}
int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
struct mfc_cache *mfc, bool replace)
{
struct mlxsw_sp_mr_route *mr_orig_route = NULL;
struct mlxsw_sp_mr_route *mr_route;
int err;
/* If the route is a (*,*) route, abort, as these kind of routes are
* used for proxy routes.
*/
if (mfc->mfc_origin == INADDR_ANY && mfc->mfc_mcastgrp == INADDR_ANY) {
dev_warn(mr_table->mlxsw_sp->bus_info->dev,
"Offloading proxy routes is not supported.\n");
return -EINVAL;
}
/* Create a new route */
mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
if (IS_ERR(mr_route))
return PTR_ERR(mr_route);
/* Find any route with a matching key */
mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
&mr_route->key,
mlxsw_sp_mr_route_ht_params);
if (replace) {
/* On replace case, make the route point to the new route_priv.
*/
if (WARN_ON(!mr_orig_route)) {
err = -ENOENT;
goto err_no_orig_route;
}
mr_route->route_priv = mr_orig_route->route_priv;
} else if (mr_orig_route) {
/* On non replace case, if another route with the same key was
* found, abort, as duplicate routes are used for proxy routes.
*/
dev_warn(mr_table->mlxsw_sp->bus_info->dev,
"Offloading proxy routes is not supported.\n");
err = -EINVAL;
goto err_duplicate_route;
}
/* Put it in the table data-structures */
list_add_tail(&mr_route->node, &mr_table->route_list);
err = rhashtable_insert_fast(&mr_table->route_ht,
&mr_route->ht_node,
mlxsw_sp_mr_route_ht_params);
if (err)
goto err_rhashtable_insert;
/* Write the route to the hardware */
err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
if (err)
goto err_mr_route_write;
/* Destroy the original route */
if (replace) {
rhashtable_remove_fast(&mr_table->route_ht,
&mr_orig_route->ht_node,
mlxsw_sp_mr_route_ht_params);
list_del(&mr_orig_route->node);
mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
}
mlxsw_sp_mr_mfc_offload_update(mr_route);
return 0;
err_mr_route_write:
rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
mlxsw_sp_mr_route_ht_params);
err_rhashtable_insert:
list_del(&mr_route->node);
err_no_orig_route:
err_duplicate_route:
mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
return err;
}
void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
struct mfc_cache *mfc)
{
struct mlxsw_sp_mr_route *mr_route;
struct mlxsw_sp_mr_route_key key;
mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
mlxsw_sp_mr_route_ht_params);
if (mr_route)
__mlxsw_sp_mr_route_del(mr_table, mr_route);
}
/* Should be called after the VIF struct is updated */
static int
mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route_vif_entry *rve)
{
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
enum mlxsw_sp_mr_route_action route_action;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
u16 irif_index;
int err;
route_action = mlxsw_sp_mr_route_action(rve->mr_route);
if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
return 0;
/* rve->mr_vif->rif is guaranteed to be valid at this stage */
irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
irif_index);
if (err)
return err;
err = mr->mr_ops->route_action_update(mlxsw_sp,
rve->mr_route->route_priv,
route_action);
if (err)
/* No need to rollback here because the iRIF change only takes
* place after the action has been updated.
*/
return err;
rve->mr_route->route_action = route_action;
mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
return 0;
}
static void
mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route_vif_entry *rve)
{
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
MLXSW_SP_MR_ROUTE_ACTION_TRAP);
rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
}
/* Should be called after the RIF struct is updated */
static int
mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route_vif_entry *rve)
{
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
enum mlxsw_sp_mr_route_action route_action;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
u16 erif_index = 0;
int err;
/* Update the route action, as the new eVIF can be a tunnel or a pimreg
* device which will require updating the action.
*/
route_action = mlxsw_sp_mr_route_action(rve->mr_route);
if (route_action != rve->mr_route->route_action) {
err = mr->mr_ops->route_action_update(mlxsw_sp,
rve->mr_route->route_priv,
route_action);
if (err)
return err;
}
/* Add the eRIF */
if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
err = mr->mr_ops->route_erif_add(mlxsw_sp,
rve->mr_route->route_priv,
erif_index);
if (err)
goto err_route_erif_add;
}
/* Update the minimum MTU */
if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
rve->mr_route->route_priv,
rve->mr_route->min_mtu);
if (err)
goto err_route_min_mtu_update;
}
rve->mr_route->route_action = route_action;
mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
return 0;
err_route_min_mtu_update:
if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
erif_index);
err_route_erif_add:
if (route_action != rve->mr_route->route_action)
mr->mr_ops->route_action_update(mlxsw_sp,
rve->mr_route->route_priv,
rve->mr_route->route_action);
return err;
}
/* Should be called before the RIF struct is updated */
static void
mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route_vif_entry *rve)
{
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
enum mlxsw_sp_mr_route_action route_action;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
u16 rifi;
/* If the unresolved RIF was not valid, no need to delete it */
if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
return;
/* Update the route action: if there is only one valid eVIF in the
* route, set the action to trap as the VIF deletion will lead to zero
* valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
* determine the route action.
*/
if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
else
route_action = mlxsw_sp_mr_route_action(rve->mr_route);
if (route_action != rve->mr_route->route_action)
mr->mr_ops->route_action_update(mlxsw_sp,
rve->mr_route->route_priv,
route_action);
/* Delete the erif from the route */
rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
rve->mr_route->route_action = route_action;
mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
}
static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
struct net_device *dev,
struct mlxsw_sp_mr_vif *mr_vif,
unsigned long vif_flags,
const struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
int err;
/* Update the VIF */
mr_vif->dev = dev;
mr_vif->rif = rif;
mr_vif->vif_flags = vif_flags;
/* Update all routes where this VIF is used as an unresolved iRIF */
list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
if (err)
goto err_irif_unresolve;
}
/* Update all routes where this VIF is used as an unresolved eRIF */
list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
if (err)
goto err_erif_unresolve;
}
return 0;
err_erif_unresolve:
list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
vif_node)
mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
err_irif_unresolve:
list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
vif_node)
mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
mr_vif->rif = NULL;
return err;
}
static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
struct net_device *dev,
struct mlxsw_sp_mr_vif *mr_vif)
{
struct mlxsw_sp_mr_route_vif_entry *rve;
/* Update all routes where this VIF is used as an unresolved eRIF */
list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
/* Update all routes where this VIF is used as an unresolved iRIF */
list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
/* Update the VIF */
mr_vif->dev = dev;
mr_vif->rif = NULL;
}
int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
struct net_device *dev, vifi_t vif_index,
unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
if (WARN_ON(vif_index >= MAXVIFS))
return -EINVAL;
if (mr_vif->dev)
return -EEXIST;
return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
}
void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
{
struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
if (WARN_ON(vif_index >= MAXVIFS))
return;
if (WARN_ON(!mr_vif->dev))
return;
mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
}
struct mlxsw_sp_mr_vif *
mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
const struct net_device *dev)
{
vifi_t vif_index;
for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
if (mr_table->vifs[vif_index].dev == dev)
return &mr_table->vifs[vif_index];
return NULL;
}
int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif)
{
const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp_mr_vif *mr_vif;
if (!rif_dev)
return 0;
mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
if (!mr_vif)
return 0;
return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
mr_vif->vif_flags, rif);
}
void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif)
{
const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp_mr_vif *mr_vif;
if (!rif_dev)
return;
mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
if (!mr_vif)
return;
mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
}
void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif, int mtu)
{
const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
struct mlxsw_sp_mr_route_vif_entry *rve;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
struct mlxsw_sp_mr_vif *mr_vif;
if (!rif_dev)
return;
/* Search for a VIF that use that RIF */
mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
if (!mr_vif)
return;
/* Update all the routes that uses that VIF as eVIF */
list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
if (mtu < rve->mr_route->min_mtu) {
rve->mr_route->min_mtu = mtu;
mr->mr_ops->route_min_mtu_update(mlxsw_sp,
rve->mr_route->route_priv,
mtu);
}
}
}
struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
u32 vr_id,
enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_mr_route_params catchall_route_params = {
.prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
.key = {
.vrid = vr_id,
},
.value = {
.route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
}
};
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
struct mlxsw_sp_mr_table *mr_table;
int err;
int i;
mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
GFP_KERNEL);
if (!mr_table)
return ERR_PTR(-ENOMEM);
mr_table->vr_id = vr_id;
mr_table->mlxsw_sp = mlxsw_sp;
mr_table->proto = proto;
INIT_LIST_HEAD(&mr_table->route_list);
err = rhashtable_init(&mr_table->route_ht,
&mlxsw_sp_mr_route_ht_params);
if (err)
goto err_route_rhashtable_init;
for (i = 0; i < MAXVIFS; i++) {
INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
}
err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
mr_table->catchall_route_priv,
&catchall_route_params);
if (err)
goto err_ops_route_create;
list_add_tail(&mr_table->node, &mr->table_list);
return mr_table;
err_ops_route_create:
rhashtable_destroy(&mr_table->route_ht);
err_route_rhashtable_init:
kfree(mr_table);
return ERR_PTR(err);
}
void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
{
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
list_del(&mr_table->node);
mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
&mr_table->catchall_route_priv);
rhashtable_destroy(&mr_table->route_ht);
kfree(mr_table);
}
void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
{
struct mlxsw_sp_mr_route *mr_route, *tmp;
int i;
list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
__mlxsw_sp_mr_route_del(mr_table, mr_route);
for (i = 0; i < MAXVIFS; i++) {
mr_table->vifs[i].dev = NULL;
mr_table->vifs[i].rif = NULL;
}
}
bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
{
int i;
for (i = 0; i < MAXVIFS; i++)
if (mr_table->vifs[i].dev)
return false;
return list_empty(&mr_table->route_list);
}
static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_route *mr_route)
{
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
u64 packets, bytes;
if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
return;
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
if (mr_route->mfc4->mfc_un.res.pkt != packets)
mr_route->mfc4->mfc_un.res.lastuse = jiffies;
mr_route->mfc4->mfc_un.res.pkt = packets;
mr_route->mfc4->mfc_un.res.bytes = bytes;
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
default:
WARN_ON_ONCE(1);
}
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
{
struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
stats_update_dw.work);
struct mlxsw_sp_mr_table *mr_table;
struct mlxsw_sp_mr_route *mr_route;
unsigned long interval;
rtnl_lock();
list_for_each_entry(mr_table, &mr->table_list, node)
list_for_each_entry(mr_route, &mr_table->route_list, node)
mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
mr_route);
rtnl_unlock();
interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
}
int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_mr_ops *mr_ops)
{
struct mlxsw_sp_mr *mr;
unsigned long interval;
int err;
mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
if (!mr)
return -ENOMEM;
mr->mr_ops = mr_ops;
mlxsw_sp->mr = mr;
INIT_LIST_HEAD(&mr->table_list);
err = mr_ops->init(mlxsw_sp, mr->priv);
if (err)
goto err;
/* Create the delayed work for counter updates */
INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
return 0;
err:
kfree(mr);
return err;
}
void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
cancel_delayed_work_sync(&mr->stats_update_dw);
mr->mr_ops->fini(mr->priv);
kfree(mr);
}
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MLXSW_SPECTRUM_MCROUTER_H
#define _MLXSW_SPECTRUM_MCROUTER_H
#include <linux/mroute.h>
#include "spectrum_router.h"
#include "spectrum.h"
enum mlxsw_sp_mr_route_action {
MLXSW_SP_MR_ROUTE_ACTION_FORWARD,
MLXSW_SP_MR_ROUTE_ACTION_TRAP,
};
enum mlxsw_sp_mr_route_prio {
MLXSW_SP_MR_ROUTE_PRIO_SG,
MLXSW_SP_MR_ROUTE_PRIO_STARG,
MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
__MLXSW_SP_MR_ROUTE_PRIO_MAX
};
#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
struct mlxsw_sp_mr_route_key {
int vrid;
enum mlxsw_sp_l3proto proto;
union mlxsw_sp_l3addr group;
union mlxsw_sp_l3addr group_mask;
union mlxsw_sp_l3addr source;
union mlxsw_sp_l3addr source_mask;
};
struct mlxsw_sp_mr_route_info {
enum mlxsw_sp_mr_route_action route_action;
u16 irif_index;
u16 *erif_indices;
size_t erif_num;
u16 min_mtu;
};
struct mlxsw_sp_mr_route_params {
struct mlxsw_sp_mr_route_key key;
struct mlxsw_sp_mr_route_info value;
enum mlxsw_sp_mr_route_prio prio;
};
struct mlxsw_sp_mr_ops {
int priv_size;
int route_priv_size;
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_params *route_params);
int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
struct mlxsw_sp_mr_route_info *route_info);
int (*route_stats)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
u64 *packets, u64 *bytes);
int (*route_action_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
enum mlxsw_sp_mr_route_action route_action);
int (*route_min_mtu_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
u16 min_mtu);
int (*route_irif_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
u16 irif_index);
int (*route_erif_add)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
u16 erif_index);
int (*route_erif_del)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
u16 erif_index);
void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv);
void (*fini)(void *priv);
};
struct mlxsw_sp_mr;
struct mlxsw_sp_mr_table;
int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_mr_ops *mr_ops);
void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
struct mfc_cache *mfc, bool replace);
void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
struct mfc_cache *mfc);
int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
struct net_device *dev, vifi_t vif_index,
unsigned long vif_flags,
const struct mlxsw_sp_rif *rif);
void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index);
int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif);
void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif);
void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif, int mtu);
struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto);
void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table);
void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table);
bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table);
#endif
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/parman.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_actions.h"
#include "spectrum_mr.h"
struct mlxsw_sp_mr_tcam_region {
struct mlxsw_sp *mlxsw_sp;
enum mlxsw_reg_rtar_key_type rtar_key_type;
struct parman *parman;
struct parman_prio *parman_prios;
};
struct mlxsw_sp_mr_tcam {
struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
};
/* This struct maps to one RIGR2 register entry */
struct mlxsw_sp_mr_erif_sublist {
struct list_head list;
u32 rigr2_kvdl_index;
int num_erifs;
u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
bool synced;
};
struct mlxsw_sp_mr_tcam_erif_list {
struct list_head erif_sublists;
u32 kvdl_index;
};
static bool
mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_erif_sublist *erif_sublist)
{
int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MC_ERIF_LIST_ENTRIES);
return erif_sublist->num_erifs == erif_list_entries;
}
static void
mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
{
INIT_LIST_HEAD(&erif_list->erif_sublists);
}
#define MLXSW_SP_KVDL_RIGR2_SIZE 1
static struct mlxsw_sp_mr_erif_sublist *
mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
{
struct mlxsw_sp_mr_erif_sublist *erif_sublist;
int err;
erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
if (!erif_sublist)
return ERR_PTR(-ENOMEM);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
&erif_sublist->rigr2_kvdl_index);
if (err) {
kfree(erif_sublist);
return ERR_PTR(err);
}
list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
return erif_sublist;
}
static void
mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_erif_sublist *erif_sublist)
{
list_del(&erif_sublist->list);
mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
kfree(erif_sublist);
}
static int
mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list,
u16 erif_index)
{
struct mlxsw_sp_mr_erif_sublist *sublist;
/* If either there is no erif_entry or the last one is full, allocate a
* new one.
*/
if (list_empty(&erif_list->erif_sublists)) {
sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
if (IS_ERR(sublist))
return PTR_ERR(sublist);
erif_list->kvdl_index = sublist->rigr2_kvdl_index;
} else {
sublist = list_last_entry(&erif_list->erif_sublists,
struct mlxsw_sp_mr_erif_sublist,
list);
sublist->synced = false;
if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
erif_list);
if (IS_ERR(sublist))
return PTR_ERR(sublist);
}
}
/* Add the eRIF to the last entry's last index */
sublist->erif_indices[sublist->num_erifs++] = erif_index;
return 0;
}
static void
mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
{
struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
list)
mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
}
static int
mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
{
struct mlxsw_sp_mr_erif_sublist *curr_sublist;
char rigr2_pl[MLXSW_REG_RIGR2_LEN];
int err;
int i;
list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
if (curr_sublist->synced)
continue;
/* If the sublist is not the last one, pack the next index */
if (list_is_last(&curr_sublist->list,
&erif_list->erif_sublists)) {
mlxsw_reg_rigr2_pack(rigr2_pl,
curr_sublist->rigr2_kvdl_index,
false, 0);
} else {
struct mlxsw_sp_mr_erif_sublist *next_sublist;
next_sublist = list_next_entry(curr_sublist, list);
mlxsw_reg_rigr2_pack(rigr2_pl,
curr_sublist->rigr2_kvdl_index,
true,
next_sublist->rigr2_kvdl_index);
}
/* Pack all the erifs */
for (i = 0; i < curr_sublist->num_erifs; i++) {
u16 erif_index = curr_sublist->erif_indices[i];
mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
erif_index);
}
/* Write the entry */
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
rigr2_pl);
if (err)
/* No need of a rollback here because this
* hardware entry should not be pointed yet.
*/
return err;
curr_sublist->synced = true;
}
return 0;
}
static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
struct mlxsw_sp_mr_tcam_erif_list *from)
{
list_splice(&from->erif_sublists, &to->erif_sublists);
to->kvdl_index = from->kvdl_index;
}
struct mlxsw_sp_mr_tcam_route {
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
u32 counter_index;
struct parman_item parman_item;
struct parman_prio *parman_prio;
enum mlxsw_sp_mr_route_action action;
struct mlxsw_sp_mr_route_key key;
u16 irif_index;
u16 min_mtu;
};
static struct mlxsw_afa_block *
mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_mr_route_action route_action,
u16 irif_index, u32 counter_index,
u16 min_mtu,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
{
struct mlxsw_afa_block *afa_block;
int err;
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
if (IS_ERR(afa_block))
return afa_block;
err = mlxsw_afa_block_append_counter(afa_block, counter_index);
if (err)
goto err;
switch (route_action) {
case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
err = mlxsw_afa_block_append_trap(afa_block,
MLXSW_TRAP_ID_ACL1);
if (err)
goto err;
break;
case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
/* If we are about to append a multicast router action, commit
* the erif_list.
*/
err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
if (err)
goto err;
err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
min_mtu, false,
erif_list->kvdl_index);
if (err)
goto err;
break;
default:
err = -EINVAL;
goto err;
}
err = mlxsw_afa_block_commit(afa_block);
if (err)
goto err;
return afa_block;
err:
mlxsw_afa_block_destroy(afa_block);
return ERR_PTR(err);
}
static void
mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
{
mlxsw_afa_block_destroy(afa_block);
}
static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
struct parman_item *parman_item,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block)
{
char rmft2_pl[MLXSW_REG_RMFT2_LEN];
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
key->vrid,
MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
ntohl(key->group.addr4),
ntohl(key->group_mask.addr4),
ntohl(key->source.addr4),
ntohl(key->source_mask.addr4),
mlxsw_afa_block_first_set(afa_block));
break;
case MLXSW_SP_L3_PROTO_IPV6:
default:
WARN_ON_ONCE(1);
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
struct parman_item *parman_item)
{
char rmft2_pl[MLXSW_REG_RMFT2_LEN];
mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
0, 0, 0, 0, 0, 0, NULL);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
static int
mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list,
struct mlxsw_sp_mr_route_info *route_info)
{
int err;
int i;
for (i = 0; i < route_info->erif_num; i++) {
u16 erif_index = route_info->erif_indices[i];
err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
erif_index);
if (err)
return err;
}
return 0;
}
static int
mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
struct mlxsw_sp_mr_tcam_route *route,
enum mlxsw_sp_mr_route_prio prio)
{
struct parman_prio *parman_prio = NULL;
int err;
switch (route->key.proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
parman_prio, &route->parman_item);
if (err)
return err;
break;
case MLXSW_SP_L3_PROTO_IPV6:
default:
WARN_ON_ONCE(1);
}
route->parman_prio = parman_prio;
return 0;
}
static void
mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
struct mlxsw_sp_mr_tcam_route *route)
{
switch (route->key.proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
route->parman_prio, &route->parman_item);
break;
case MLXSW_SP_L3_PROTO_IPV6:
default:
WARN_ON_ONCE(1);
}
}
static int
mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_params *route_params)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
int err;
route->key = route_params->key;
route->irif_index = route_params->value.irif_index;
route->min_mtu = route_params->value.min_mtu;
route->action = route_params->value.route_action;
/* Create the egress RIFs list */
mlxsw_sp_mr_erif_list_init(&route->erif_list);
err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
&route_params->value);
if (err)
goto err_erif_populate;
/* Create the flow counter */
err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
if (err)
goto err_counter_alloc;
/* Create the flexible action block */
route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
route->action,
route->irif_index,
route->counter_index,
route->min_mtu,
&route->erif_list);
if (IS_ERR(route->afa_block)) {
err = PTR_ERR(route->afa_block);
goto err_afa_block_create;
}
/* Allocate place in the TCAM */
err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
route_params->prio);
if (err)
goto err_parman_item_add;
/* Write the route to the TCAM */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, route->afa_block);
if (err)
goto err_route_replace;
return 0;
err_route_replace:
mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
err_parman_item_add:
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
err_afa_block_create:
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
err_erif_populate:
err_counter_alloc:
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
return err;
}
static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
void *priv, void *route_priv)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
&route->parman_item);
mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
}
static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u64 *packets,
u64 *bytes)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
packets, bytes);
}
static int
mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv,
enum mlxsw_sp_mr_route_action route_action)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
/* Create a new flexible action block */
afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
route->irif_index,
route->counter_index,
route->min_mtu,
&route->erif_list);
if (IS_ERR(afa_block))
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, afa_block);
if (err)
goto err;
/* Delete the old one */
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
route->afa_block = afa_block;
route->action = route_action;
return 0;
err:
mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
return err;
}
static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 min_mtu)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
/* Create a new flexible action block */
afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
route->action,
route->irif_index,
route->counter_index,
min_mtu,
&route->erif_list);
if (IS_ERR(afa_block))
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, afa_block);
if (err)
goto err;
/* Delete the old one */
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
route->afa_block = afa_block;
route->min_mtu = min_mtu;
return 0;
err:
mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
return err;
}
static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 irif_index)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
return -EINVAL;
route->irif_index = irif_index;
return 0;
}
static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 erif_index)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
int err;
err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
erif_index);
if (err)
return err;
/* Commit the action only if the route action is not TRAP */
if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
&route->erif_list);
return 0;
}
static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 erif_index)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_erif_sublist *erif_sublist;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
int err;
int i;
/* Create a copy of the original erif_list without the deleted entry */
mlxsw_sp_mr_erif_list_init(&erif_list);
list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
for (i = 0; i < erif_sublist->num_erifs; i++) {
u16 curr_erif = erif_sublist->erif_indices[i];
if (curr_erif == erif_index)
continue;
err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
curr_erif);
if (err)
goto err_erif_list_add;
}
}
/* Create the flexible action block pointing to the new erif_list */
afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
route->irif_index,
route->counter_index,
route->min_mtu,
&erif_list);
if (IS_ERR(afa_block)) {
err = PTR_ERR(afa_block);
goto err_afa_block_create;
}
/* Update the TCAM route entry */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, afa_block);
if (err)
goto err_route_write;
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
route->afa_block = afa_block;
mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
return 0;
err_route_write:
mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
err_afa_block_create:
err_erif_list_add:
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
return err;
}
static int
mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
struct mlxsw_sp_mr_route_info *route_info)
{
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
int err;
/* Create a new erif_list */
mlxsw_sp_mr_erif_list_init(&erif_list);
err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
if (err)
goto err_erif_populate;
/* Create the flexible action block pointing to the new erif_list */
afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
route_info->route_action,
route_info->irif_index,
route->counter_index,
route_info->min_mtu,
&erif_list);
if (IS_ERR(afa_block)) {
err = PTR_ERR(afa_block);
goto err_afa_block_create;
}
/* Update the TCAM route entry */
err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
&route->key, afa_block);
if (err)
goto err_route_write;
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
route->afa_block = afa_block;
mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
route->action = route_info->route_action;
route->irif_index = route_info->irif_index;
route->min_mtu = route_info->min_mtu;
return 0;
err_route_write:
mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
err_afa_block_create:
err_erif_populate:
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
return err;
}
#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
static int
mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
{
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
mr_tcam_region->rtar_key_type,
MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static void
mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
{
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
mr_tcam_region->rtar_key_type, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
unsigned long new_count)
{
struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rtar_pl[MLXSW_REG_RTAR_LEN];
u64 max_tcam_rules;
max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
if (new_count > max_tcam_rules)
return -EINVAL;
mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
mr_tcam_region->rtar_key_type, new_count);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
}
static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
unsigned long from_index,
unsigned long to_index,
unsigned long count)
{
struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
char rrcr_pl[MLXSW_REG_RRCR_LEN];
mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
from_index, count,
mr_tcam_region->rtar_key_type, to_index);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
}
static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
.base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
.resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
.resize = mlxsw_sp_mr_tcam_region_parman_resize,
.move = mlxsw_sp_mr_tcam_region_parman_move,
.algo = PARMAN_ALGO_TYPE_LSORT,
};
static int
mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
enum mlxsw_reg_rtar_key_type rtar_key_type)
{
struct parman_prio *parman_prios;
struct parman *parman;
int err;
int i;
mr_tcam_region->rtar_key_type = rtar_key_type;
mr_tcam_region->mlxsw_sp = mlxsw_sp;
err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
if (err)
return err;
parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
mr_tcam_region);
if (!parman) {
err = -ENOMEM;
goto err_parman_create;
}
mr_tcam_region->parman = parman;
parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
sizeof(*parman_prios), GFP_KERNEL);
if (!parman_prios)
goto err_parman_prios_alloc;
mr_tcam_region->parman_prios = parman_prios;
for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
parman_prio_init(mr_tcam_region->parman,
&mr_tcam_region->parman_prios[i], i);
return 0;
err_parman_prios_alloc:
parman_destroy(parman);
err_parman_create:
mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
return err;
}
static void
mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
{
int i;
for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
parman_prio_fini(&mr_tcam_region->parman_prios[i]);
kfree(mr_tcam_region->parman_prios);
parman_destroy(mr_tcam_region->parman);
mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
}
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
return -EIO;
return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
&mr_tcam->ipv4_tcam_region,
MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
}
static void mlxsw_sp_mr_tcam_fini(void *priv)
{
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
}
const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
.priv_size = sizeof(struct mlxsw_sp_mr_tcam),
.route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
.init = mlxsw_sp_mr_tcam_init,
.route_create = mlxsw_sp_mr_tcam_route_create,
.route_update = mlxsw_sp_mr_tcam_route_update,
.route_stats = mlxsw_sp_mr_tcam_route_stats,
.route_action_update = mlxsw_sp_mr_tcam_route_action_update,
.route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
.route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
.route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
.route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
.route_destroy = mlxsw_sp_mr_tcam_route_destroy,
.fini = mlxsw_sp_mr_tcam_fini,
};
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H
#define _MLXSW_SPECTRUM_MCROUTER_TCAM_H
#include "spectrum.h"
#include "spectrum_mr.h"
extern const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops;
#endif
...@@ -65,6 +65,8 @@ ...@@ -65,6 +65,8 @@
#include "spectrum_cnt.h" #include "spectrum_cnt.h"
#include "spectrum_dpipe.h" #include "spectrum_dpipe.h"
#include "spectrum_ipip.h" #include "spectrum_ipip.h"
#include "spectrum_mr.h"
#include "spectrum_mr_tcam.h"
#include "spectrum_router.h" #include "spectrum_router.h"
struct mlxsw_sp_vr; struct mlxsw_sp_vr;
...@@ -459,6 +461,7 @@ struct mlxsw_sp_vr { ...@@ -459,6 +461,7 @@ struct mlxsw_sp_vr {
unsigned int rif_count; unsigned int rif_count;
struct mlxsw_sp_fib *fib4; struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_fib *fib6; struct mlxsw_sp_fib *fib6;
struct mlxsw_sp_mr_table *mr4_table;
}; };
static const struct rhashtable_params mlxsw_sp_fib_ht_params; static const struct rhashtable_params mlxsw_sp_fib_ht_params;
...@@ -653,7 +656,7 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) ...@@ -653,7 +656,7 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
{ {
return !!vr->fib4 || !!vr->fib6; return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
} }
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
...@@ -693,8 +696,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, ...@@ -693,8 +696,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
static u32 mlxsw_sp_fix_tb_id(u32 tb_id) static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
{ {
/* For our purpose, squash main and local table into one */ /* For our purpose, squash main, default and local tables into one */
if (tb_id == RT_TABLE_LOCAL) if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
tb_id = RT_TABLE_MAIN; tb_id = RT_TABLE_MAIN;
return tb_id; return tb_id;
} }
...@@ -744,9 +747,18 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, ...@@ -744,9 +747,18 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
err = PTR_ERR(vr->fib6); err = PTR_ERR(vr->fib6);
goto err_fib6_create; goto err_fib6_create;
} }
vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr->mr4_table)) {
err = PTR_ERR(vr->mr4_table);
goto err_mr_table_create;
}
vr->tb_id = tb_id; vr->tb_id = tb_id;
return vr; return vr;
err_mr_table_create:
mlxsw_sp_fib_destroy(vr->fib6);
vr->fib6 = NULL;
err_fib6_create: err_fib6_create:
mlxsw_sp_fib_destroy(vr->fib4); mlxsw_sp_fib_destroy(vr->fib4);
vr->fib4 = NULL; vr->fib4 = NULL;
...@@ -755,6 +767,8 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, ...@@ -755,6 +767,8 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
{ {
mlxsw_sp_mr_table_destroy(vr->mr4_table);
vr->mr4_table = NULL;
mlxsw_sp_fib_destroy(vr->fib6); mlxsw_sp_fib_destroy(vr->fib6);
vr->fib6 = NULL; vr->fib6 = NULL;
mlxsw_sp_fib_destroy(vr->fib4); mlxsw_sp_fib_destroy(vr->fib4);
...@@ -775,7 +789,8 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) ...@@ -775,7 +789,8 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
{ {
if (!vr->rif_count && list_empty(&vr->fib4->node_list) && if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
list_empty(&vr->fib6->node_list)) list_empty(&vr->fib6->node_list) &&
mlxsw_sp_mr_table_empty(vr->mr4_table))
mlxsw_sp_vr_destroy(vr); mlxsw_sp_vr_destroy(vr);
} }
...@@ -4731,6 +4746,75 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, ...@@ -4731,6 +4746,75 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
return 0; return 0;
} }
static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
struct mfc_entry_notifier_info *men_info,
bool replace)
{
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
return 0;
vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id);
if (IS_ERR(vr))
return PTR_ERR(vr);
return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
}
static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
struct mfc_entry_notifier_info *men_info)
{
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
return;
vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
if (WARN_ON(!vr))
return;
mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
mlxsw_sp_vr_put(vr);
}
static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
struct vif_entry_notifier_info *ven_info)
{
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
return 0;
vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id);
if (IS_ERR(vr))
return PTR_ERR(vr);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
ven_info->vif_index,
ven_info->vif_flags, rif);
}
static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
struct vif_entry_notifier_info *ven_info)
{
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
return;
vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
if (WARN_ON(!vr))
return;
mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
mlxsw_sp_vr_put(vr);
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{ {
enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4; enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
...@@ -4741,6 +4825,10 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) ...@@ -4741,6 +4825,10 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err) if (err)
return err; return err;
/* The multicast router code does not need an abort trap as by default,
* packets that don't match any routes are trapped to the CPU.
*/
proto = MLXSW_REG_RALXX_PROTOCOL_IPV6; proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
MLXSW_SP_LPM_TREE_MIN + 1); MLXSW_SP_LPM_TREE_MIN + 1);
...@@ -4822,6 +4910,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) ...@@ -4822,6 +4910,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp_vr_is_used(vr)) if (!mlxsw_sp_vr_is_used(vr))
continue; continue;
mlxsw_sp_mr_table_flush(vr->mr4_table);
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
/* If virtual router was only used for IPv4, then it's no /* If virtual router was only used for IPv4, then it's no
...@@ -4854,6 +4944,8 @@ struct mlxsw_sp_fib_event_work { ...@@ -4854,6 +4944,8 @@ struct mlxsw_sp_fib_event_work {
struct fib_entry_notifier_info fen_info; struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info; struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info; struct fib_nh_notifier_info fnh_info;
struct mfc_entry_notifier_info men_info;
struct vif_entry_notifier_info ven_info;
}; };
struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp *mlxsw_sp;
unsigned long event; unsigned long event;
...@@ -4940,6 +5032,55 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) ...@@ -4940,6 +5032,55 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
kfree(fib_work); kfree(fib_work);
} }
static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
{
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
struct fib_rule *rule;
bool replace;
int err;
rtnl_lock();
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD:
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
ipmr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
ipmr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
&fib_work->ven_info);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
dev_put(fib_work->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
&fib_work->ven_info);
dev_put(fib_work->ven_info.dev);
break;
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
rule = fib_work->fr_info.rule;
if (!ipmr_rule_default(rule) && !rule->l3mdev)
mlxsw_sp_router_fib_abort(mlxsw_sp);
fib_rule_put(rule);
break;
}
rtnl_unlock();
kfree(fib_work);
}
static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info) struct fib_notifier_info *info)
{ {
...@@ -4985,6 +5126,30 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, ...@@ -4985,6 +5126,30 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
} }
} }
static void
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
ipmr_cache_hold(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD: /* fall through */
case FIB_EVENT_VIF_DEL:
memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
dev_hold(fib_work->ven_info.dev);
break;
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
fib_rule_get(fib_work->fr_info.rule);
break;
}
}
/* Called with rcu_read_lock() */ /* Called with rcu_read_lock() */
static int mlxsw_sp_router_fib_event(struct notifier_block *nb, static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr) unsigned long event, void *ptr)
...@@ -4994,7 +5159,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, ...@@ -4994,7 +5159,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
struct mlxsw_sp_router *router; struct mlxsw_sp_router *router;
if (!net_eq(info->net, &init_net) || if (!net_eq(info->net, &init_net) ||
(info->family != AF_INET && info->family != AF_INET6)) (info->family != AF_INET && info->family != AF_INET6 &&
info->family != RTNL_FAMILY_IPMR))
return NOTIFY_DONE; return NOTIFY_DONE;
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
...@@ -5014,6 +5180,10 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, ...@@ -5014,6 +5180,10 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
mlxsw_sp_router_fib6_event(fib_work, info); mlxsw_sp_router_fib6_event(fib_work, info);
break; break;
case RTNL_FAMILY_IPMR:
INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
mlxsw_sp_router_fibmr_event(fib_work, info);
break;
} }
mlxsw_core_schedule_work(&fib_work->work); mlxsw_core_schedule_work(&fib_work->work);
...@@ -5227,12 +5397,18 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, ...@@ -5227,12 +5397,18 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
goto err_configure; goto err_configure;
err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
if (err)
goto err_mr_rif_add;
mlxsw_sp_rif_counters_alloc(rif); mlxsw_sp_rif_counters_alloc(rif);
mlxsw_sp->router->rifs[rif_index] = rif; mlxsw_sp->router->rifs[rif_index] = rif;
vr->rif_count++; vr->rif_count++;
return rif; return rif;
err_mr_rif_add:
ops->deconfigure(rif);
err_configure: err_configure:
if (fid) if (fid)
mlxsw_sp_fid_put(fid); mlxsw_sp_fid_put(fid);
...@@ -5257,6 +5433,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) ...@@ -5257,6 +5433,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
vr->rif_count--; vr->rif_count--;
mlxsw_sp->router->rifs[rif->rif_index] = NULL; mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif); mlxsw_sp_rif_counters_free(rif);
mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
ops->deconfigure(rif); ops->deconfigure(rif);
if (fid) if (fid)
/* Loopback RIFs are not associated with a FID. */ /* Loopback RIFs are not associated with a FID. */
...@@ -5597,6 +5774,17 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) ...@@ -5597,6 +5774,17 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
if (err) if (err)
goto err_rif_fdb_op; goto err_rif_fdb_op;
if (rif->mtu != dev->mtu) {
struct mlxsw_sp_vr *vr;
/* The RIF is relevant only to its mr_table instance, as unlike
* unicast routing, in multicast routing a RIF cannot be shared
* between several multicast routing tables.
*/
vr = &mlxsw_sp->router->vrs[rif->vr_id];
mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
}
ether_addr_copy(rif->addr, dev->dev_addr); ether_addr_copy(rif->addr, dev->dev_addr);
rif->mtu = dev->mtu; rif->mtu = dev->mtu;
...@@ -6120,6 +6308,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) ...@@ -6120,6 +6308,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err) if (err)
goto err_lpm_init; goto err_lpm_init;
err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
if (err)
goto err_mr_init;
err = mlxsw_sp_vrs_init(mlxsw_sp); err = mlxsw_sp_vrs_init(mlxsw_sp);
if (err) if (err)
goto err_vrs_init; goto err_vrs_init;
...@@ -6141,6 +6333,8 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) ...@@ -6141,6 +6333,8 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
err_neigh_init: err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init: err_vrs_init:
mlxsw_sp_mr_fini(mlxsw_sp);
err_mr_init:
mlxsw_sp_lpm_fini(mlxsw_sp); mlxsw_sp_lpm_fini(mlxsw_sp);
err_lpm_init: err_lpm_init:
rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
...@@ -6162,6 +6356,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) ...@@ -6162,6 +6356,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(&mlxsw_sp->router->fib_nb); unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp); mlxsw_sp_lpm_fini(mlxsw_sp);
rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
rhashtable_destroy(&mlxsw_sp->router->nexthop_ht); rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include <linux/pim.h> #include <linux/pim.h>
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/fib_rules.h>
#include <net/fib_notifier.h>
#include <uapi/linux/mroute.h> #include <uapi/linux/mroute.h>
#ifdef CONFIG_IP_MROUTE #ifdef CONFIG_IP_MROUTE
...@@ -18,6 +20,7 @@ int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); ...@@ -18,6 +20,7 @@ int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
int ip_mr_init(void); int ip_mr_init(void);
bool ipmr_rule_default(const struct fib_rule *rule);
#else #else
static inline int ip_mroute_setsockopt(struct sock *sock, int optname, static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
char __user *optval, unsigned int optlen) char __user *optval, unsigned int optlen)
...@@ -45,6 +48,11 @@ static inline int ip_mroute_opt(int opt) ...@@ -45,6 +48,11 @@ static inline int ip_mroute_opt(int opt)
{ {
return 0; return 0;
} }
static inline bool ipmr_rule_default(const struct fib_rule *rule)
{
return true;
}
#endif #endif
struct vif_device { struct vif_device {
...@@ -58,6 +66,14 @@ struct vif_device { ...@@ -58,6 +66,14 @@ struct vif_device {
int link; /* Physical interface index */ int link; /* Physical interface index */
}; };
struct vif_entry_notifier_info {
struct fib_notifier_info info;
struct net_device *dev;
vifi_t vif_index;
unsigned short vif_flags;
u32 tb_id;
};
#define VIFF_STATIC 0x8000 #define VIFF_STATIC 0x8000
#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
...@@ -81,9 +97,11 @@ struct mr_table { ...@@ -81,9 +97,11 @@ struct mr_table {
/* mfc_flags: /* mfc_flags:
* MFC_STATIC - the entry was added statically (not by a routing daemon) * MFC_STATIC - the entry was added statically (not by a routing daemon)
* MFC_OFFLOAD - the entry was offloaded to the hardware
*/ */
enum { enum {
MFC_STATIC = BIT(0), MFC_STATIC = BIT(0),
MFC_OFFLOAD = BIT(1),
}; };
struct mfc_cache_cmp_arg { struct mfc_cache_cmp_arg {
...@@ -109,6 +127,7 @@ struct mfc_cache_cmp_arg { ...@@ -109,6 +127,7 @@ struct mfc_cache_cmp_arg {
* @wrong_if: number of wrong source interface hits * @wrong_if: number of wrong source interface hits
* @lastuse: time of last use of the group (traffic or update) * @lastuse: time of last use of the group (traffic or update)
* @ttls: OIF TTL threshold array * @ttls: OIF TTL threshold array
* @refcount: reference count for this entry
* @list: global entry list * @list: global entry list
* @rcu: used for entry destruction * @rcu: used for entry destruction
*/ */
...@@ -138,14 +157,40 @@ struct mfc_cache { ...@@ -138,14 +157,40 @@ struct mfc_cache {
unsigned long wrong_if; unsigned long wrong_if;
unsigned long lastuse; unsigned long lastuse;
unsigned char ttls[MAXVIFS]; unsigned char ttls[MAXVIFS];
refcount_t refcount;
} res; } res;
} mfc_un; } mfc_un;
struct list_head list; struct list_head list;
struct rcu_head rcu; struct rcu_head rcu;
}; };
struct mfc_entry_notifier_info {
struct fib_notifier_info info;
struct mfc_cache *mfc;
u32 tb_id;
};
struct rtmsg; struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb, int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr, __be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid); struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IP_MROUTE
void ipmr_cache_free(struct mfc_cache *mfc_cache);
#else
static inline void ipmr_cache_free(struct mfc_cache *mfc_cache)
{
}
#endif
static inline void ipmr_cache_put(struct mfc_cache *c)
{
if (refcount_dec_and_test(&c->mfc_un.res.refcount))
ipmr_cache_free(c);
}
static inline void ipmr_cache_hold(struct mfc_cache *c)
{
refcount_inc(&c->mfc_un.res.refcount);
}
#endif #endif
...@@ -20,6 +20,8 @@ enum fib_event_type { ...@@ -20,6 +20,8 @@ enum fib_event_type {
FIB_EVENT_RULE_DEL, FIB_EVENT_RULE_DEL,
FIB_EVENT_NH_ADD, FIB_EVENT_NH_ADD,
FIB_EVENT_NH_DEL, FIB_EVENT_NH_DEL,
FIB_EVENT_VIF_ADD,
FIB_EVENT_VIF_DEL,
}; };
struct fib_notifier_ops { struct fib_notifier_ops {
......
...@@ -163,6 +163,9 @@ struct netns_ipv4 { ...@@ -163,6 +163,9 @@ struct netns_ipv4 {
struct fib_notifier_ops *notifier_ops; struct fib_notifier_ops *notifier_ops;
unsigned int fib_seq; /* protected by rtnl_mutex */ unsigned int fib_seq; /* protected by rtnl_mutex */
struct fib_notifier_ops *ipmr_notifier_ops;
unsigned int ipmr_seq; /* protected by rtnl_mutex */
atomic_t rt_genid; atomic_t rt_genid;
}; };
#endif #endif
...@@ -264,6 +264,22 @@ static void __net_exit ipmr_rules_exit(struct net *net) ...@@ -264,6 +264,22 @@ static void __net_exit ipmr_rules_exit(struct net *net)
fib_rules_unregister(net->ipv4.mr_rules_ops); fib_rules_unregister(net->ipv4.mr_rules_ops);
rtnl_unlock(); rtnl_unlock();
} }
static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
{
return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR);
}
static unsigned int ipmr_rules_seq_read(struct net *net)
{
return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
}
bool ipmr_rule_default(const struct fib_rule *rule)
{
return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
}
EXPORT_SYMBOL(ipmr_rule_default);
#else #else
#define ipmr_for_each_table(mrt, net) \ #define ipmr_for_each_table(mrt, net) \
for (mrt = net->ipv4.mrt; mrt; mrt = NULL) for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
...@@ -298,6 +314,22 @@ static void __net_exit ipmr_rules_exit(struct net *net) ...@@ -298,6 +314,22 @@ static void __net_exit ipmr_rules_exit(struct net *net)
net->ipv4.mrt = NULL; net->ipv4.mrt = NULL;
rtnl_unlock(); rtnl_unlock();
} }
static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
{
return 0;
}
static unsigned int ipmr_rules_seq_read(struct net *net)
{
return 0;
}
bool ipmr_rule_default(const struct fib_rule *rule)
{
return true;
}
EXPORT_SYMBOL(ipmr_rule_default);
#endif #endif
static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg, static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
...@@ -587,6 +619,82 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) ...@@ -587,6 +619,82 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
} }
#endif #endif
static int call_ipmr_vif_entry_notifier(struct notifier_block *nb,
struct net *net,
enum fib_event_type event_type,
struct vif_device *vif,
vifi_t vif_index, u32 tb_id)
{
struct vif_entry_notifier_info info = {
.info = {
.family = RTNL_FAMILY_IPMR,
.net = net,
},
.dev = vif->dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
};
return call_fib_notifier(nb, net, event_type, &info.info);
}
static int call_ipmr_vif_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct vif_device *vif,
vifi_t vif_index, u32 tb_id)
{
struct vif_entry_notifier_info info = {
.info = {
.family = RTNL_FAMILY_IPMR,
.net = net,
},
.dev = vif->dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
};
ASSERT_RTNL();
net->ipv4.ipmr_seq++;
return call_fib_notifiers(net, event_type, &info.info);
}
static int call_ipmr_mfc_entry_notifier(struct notifier_block *nb,
struct net *net,
enum fib_event_type event_type,
struct mfc_cache *mfc, u32 tb_id)
{
struct mfc_entry_notifier_info info = {
.info = {
.family = RTNL_FAMILY_IPMR,
.net = net,
},
.mfc = mfc,
.tb_id = tb_id
};
return call_fib_notifier(nb, net, event_type, &info.info);
}
static int call_ipmr_mfc_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct mfc_cache *mfc, u32 tb_id)
{
struct mfc_entry_notifier_info info = {
.info = {
.family = RTNL_FAMILY_IPMR,
.net = net,
},
.mfc = mfc,
.tb_id = tb_id
};
ASSERT_RTNL();
net->ipv4.ipmr_seq++;
return call_fib_notifiers(net, event_type, &info.info);
}
/** /**
* vif_delete - Delete a VIF entry * vif_delete - Delete a VIF entry
* @notify: Set to 1, if the caller is a notifier_call * @notify: Set to 1, if the caller is a notifier_call
...@@ -594,6 +702,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) ...@@ -594,6 +702,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
static int vif_delete(struct mr_table *mrt, int vifi, int notify, static int vif_delete(struct mr_table *mrt, int vifi, int notify,
struct list_head *head) struct list_head *head)
{ {
struct net *net = read_pnet(&mrt->net);
struct vif_device *v; struct vif_device *v;
struct net_device *dev; struct net_device *dev;
struct in_device *in_dev; struct in_device *in_dev;
...@@ -603,6 +712,10 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify, ...@@ -603,6 +712,10 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
v = &mrt->vif_table[vifi]; v = &mrt->vif_table[vifi];
if (VIF_EXISTS(mrt, vifi))
call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, vifi,
mrt->id);
write_lock_bh(&mrt_lock); write_lock_bh(&mrt_lock);
dev = v->dev; dev = v->dev;
v->dev = NULL; v->dev = NULL;
...@@ -652,10 +765,11 @@ static void ipmr_cache_free_rcu(struct rcu_head *head) ...@@ -652,10 +765,11 @@ static void ipmr_cache_free_rcu(struct rcu_head *head)
kmem_cache_free(mrt_cachep, c); kmem_cache_free(mrt_cachep, c);
} }
static inline void ipmr_cache_free(struct mfc_cache *c) void ipmr_cache_free(struct mfc_cache *c)
{ {
call_rcu(&c->rcu, ipmr_cache_free_rcu); call_rcu(&c->rcu, ipmr_cache_free_rcu);
} }
EXPORT_SYMBOL(ipmr_cache_free);
/* Destroy an unresolved cache entry, killing queued skbs /* Destroy an unresolved cache entry, killing queued skbs
* and reporting error to netlink readers. * and reporting error to netlink readers.
...@@ -851,6 +965,7 @@ static int vif_add(struct net *net, struct mr_table *mrt, ...@@ -851,6 +965,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
if (vifi+1 > mrt->maxvif) if (vifi+1 > mrt->maxvif)
mrt->maxvif = vifi+1; mrt->maxvif = vifi+1;
write_unlock_bh(&mrt_lock); write_unlock_bh(&mrt_lock);
call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id);
return 0; return 0;
} }
...@@ -949,6 +1064,7 @@ static struct mfc_cache *ipmr_cache_alloc(void) ...@@ -949,6 +1064,7 @@ static struct mfc_cache *ipmr_cache_alloc(void)
if (c) { if (c) {
c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXVIFS; c->mfc_un.res.minvif = MAXVIFS;
refcount_set(&c->mfc_un.res.refcount, 1);
} }
return c; return c;
} }
...@@ -1150,6 +1266,7 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, ...@@ -1150,6 +1266,7 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent) static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
{ {
struct net *net = read_pnet(&mrt->net);
struct mfc_cache *c; struct mfc_cache *c;
/* The entries are added/deleted only under RTNL */ /* The entries are added/deleted only under RTNL */
...@@ -1161,8 +1278,9 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent) ...@@ -1161,8 +1278,9 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
return -ENOENT; return -ENOENT;
rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params); rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
list_del_rcu(&c->list); list_del_rcu(&c->list);
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
mroute_netlink_event(mrt, c, RTM_DELROUTE); mroute_netlink_event(mrt, c, RTM_DELROUTE);
ipmr_cache_free(c); ipmr_cache_put(c);
return 0; return 0;
} }
...@@ -1189,6 +1307,8 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, ...@@ -1189,6 +1307,8 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
if (!mrtsock) if (!mrtsock)
c->mfc_flags |= MFC_STATIC; c->mfc_flags |= MFC_STATIC;
write_unlock_bh(&mrt_lock); write_unlock_bh(&mrt_lock);
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
mrt->id);
mroute_netlink_event(mrt, c, RTM_NEWROUTE); mroute_netlink_event(mrt, c, RTM_NEWROUTE);
return 0; return 0;
} }
...@@ -1238,6 +1358,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, ...@@ -1238,6 +1358,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
ipmr_cache_resolve(net, mrt, uc, c); ipmr_cache_resolve(net, mrt, uc, c);
ipmr_cache_free(uc); ipmr_cache_free(uc);
} }
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
mroute_netlink_event(mrt, c, RTM_NEWROUTE); mroute_netlink_event(mrt, c, RTM_NEWROUTE);
return 0; return 0;
} }
...@@ -1245,6 +1366,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, ...@@ -1245,6 +1366,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
/* Close the multicast socket, and clear the vif tables etc */ /* Close the multicast socket, and clear the vif tables etc */
static void mroute_clean_tables(struct mr_table *mrt, bool all) static void mroute_clean_tables(struct mr_table *mrt, bool all)
{ {
struct net *net = read_pnet(&mrt->net);
struct mfc_cache *c, *tmp; struct mfc_cache *c, *tmp;
LIST_HEAD(list); LIST_HEAD(list);
int i; int i;
...@@ -1263,8 +1385,10 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all) ...@@ -1263,8 +1385,10 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
continue; continue;
rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params); rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
list_del_rcu(&c->list); list_del_rcu(&c->list);
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c,
mrt->id);
mroute_netlink_event(mrt, c, RTM_DELROUTE); mroute_netlink_event(mrt, c, RTM_DELROUTE);
ipmr_cache_free(c); ipmr_cache_put(c);
} }
if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
...@@ -2156,6 +2280,9 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, ...@@ -2156,6 +2280,9 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0) nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
return -EMSGSIZE; return -EMSGSIZE;
if (c->mfc_flags & MFC_OFFLOAD)
rtm->rtm_flags |= RTNH_F_OFFLOAD;
if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH))) if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
return -EMSGSIZE; return -EMSGSIZE;
...@@ -3048,14 +3175,87 @@ static const struct net_protocol pim_protocol = { ...@@ -3048,14 +3175,87 @@ static const struct net_protocol pim_protocol = {
}; };
#endif #endif
static unsigned int ipmr_seq_read(struct net *net)
{
ASSERT_RTNL();
return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
}
static int ipmr_dump(struct net *net, struct notifier_block *nb)
{
struct mr_table *mrt;
int err;
err = ipmr_rules_dump(net, nb);
if (err)
return err;
ipmr_for_each_table(mrt, net) {
struct vif_device *v = &mrt->vif_table[0];
struct mfc_cache *mfc;
int vifi;
/* Notifiy on table VIF entries */
read_lock(&mrt_lock);
for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
if (!v->dev)
continue;
call_ipmr_vif_entry_notifier(nb, net, FIB_EVENT_VIF_ADD,
v, vifi, mrt->id);
}
read_unlock(&mrt_lock);
/* Notify on table MFC entries */
list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
call_ipmr_mfc_entry_notifier(nb, net,
FIB_EVENT_ENTRY_ADD, mfc,
mrt->id);
}
return 0;
}
static const struct fib_notifier_ops ipmr_notifier_ops_template = {
.family = RTNL_FAMILY_IPMR,
.fib_seq_read = ipmr_seq_read,
.fib_dump = ipmr_dump,
.owner = THIS_MODULE,
};
int __net_init ipmr_notifier_init(struct net *net)
{
struct fib_notifier_ops *ops;
net->ipv4.ipmr_seq = 0;
ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
if (IS_ERR(ops))
return PTR_ERR(ops);
net->ipv4.ipmr_notifier_ops = ops;
return 0;
}
static void __net_exit ipmr_notifier_exit(struct net *net)
{
fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
net->ipv4.ipmr_notifier_ops = NULL;
}
/* Setup for IP multicast routing */ /* Setup for IP multicast routing */
static int __net_init ipmr_net_init(struct net *net) static int __net_init ipmr_net_init(struct net *net)
{ {
int err; int err;
err = ipmr_notifier_init(net);
if (err)
goto ipmr_notifier_fail;
err = ipmr_rules_init(net); err = ipmr_rules_init(net);
if (err < 0) if (err < 0)
goto fail; goto ipmr_rules_fail;
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
err = -ENOMEM; err = -ENOMEM;
...@@ -3072,7 +3272,9 @@ static int __net_init ipmr_net_init(struct net *net) ...@@ -3072,7 +3272,9 @@ static int __net_init ipmr_net_init(struct net *net)
proc_vif_fail: proc_vif_fail:
ipmr_rules_exit(net); ipmr_rules_exit(net);
#endif #endif
fail: ipmr_rules_fail:
ipmr_notifier_exit(net);
ipmr_notifier_fail:
return err; return err;
} }
...@@ -3082,6 +3284,7 @@ static void __net_exit ipmr_net_exit(struct net *net) ...@@ -3082,6 +3284,7 @@ static void __net_exit ipmr_net_exit(struct net *net)
remove_proc_entry("ip_mr_cache", net->proc_net); remove_proc_entry("ip_mr_cache", net->proc_net);
remove_proc_entry("ip_mr_vif", net->proc_net); remove_proc_entry("ip_mr_vif", net->proc_net);
#endif #endif
ipmr_notifier_exit(net);
ipmr_rules_exit(net); ipmr_rules_exit(net);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment