Commit d521bc0a authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'mlxsw-unified-bridge-conversion-part-4-6'

Ido Schimmel says:

====================
mlxsw: Unified bridge conversion - part 4/6

This is the fourth part of the conversion of mlxsw to the unified bridge
model.

Unlike previous parts that prepared mlxsw for the conversion, this part
actually starts the conversion. It focuses on flooding configuration and
converts mlxsw to the more "raw" APIs of the unified bridge model.

The patches configure the different stages of the flooding pipeline in
Spectrum that looks as follows (at a high-level):

         +------------+                +----------+           +-------+
  {FID,  |            | {Packet type,  |          |           |       |  MID
   DMAC} | FDB lookup |  Bridge type}  |   SFGC   | MID base  |       | Index
+-------->   (miss)   +----------------> register +-----------> Adder +------->
         |            |                |          |           |       |
         |            |                |          |           |       |
         +------------+                +----+-----+           +---^---+
                                            |                     |
                                    Table   |                     |
                                     type   |                     | Offset
                                            |      +-------+      |
                                            |      |       |      |
                                            |      |       |      |
                                            +----->+  Mux  +------+
                                                   |       |
                                                   |       |
                                                   +-^---^-+
                                                     |   |
                                                  FID|   |FID
                                                     |   |offset
                                                     +   +

The multicast identifier (MID) index is used as an index to the port
group table (PGT) that contains a bitmap of ports via which a packet
needs to be replicated.

From the PGT table, the packet continues to the multicast port egress
(MPE) table that determines the packet's egress VLAN. This is a
two-dimensional table that is indexed by port and switch multicast port
to egress (SMPE) index. The latter can be thought of as a FID. Without
it, all the packets replicated via a certain port would get the same
VLAN, regardless of the bridge domain (FID).

Logically, these two steps look as follows:

                     PGT table                           MPE table
             +-----------------------+               +---------------+
             |                       | {Local port,  |               | Egress
  MID index  | Local ports bitmap #1 |  SMPE index}  |               |  VID
+------------>        ...            +--------------->               +-------->
             | Local ports bitmap #N |               |               |
             |                       |          SMPE |               |
             +-----------------------+               +---------------+
                                                        Local port

Patchset overview:

Patch #1 adds a variable to guard against mixed model configuration.
Will be removed in part 6 when mlxsw is fully converted to the unified
model.

Patches #2-#5 introduce two new FID attributes required for flooding
configuration in the new model:

1. 'flood_rsp': Instructs the firmware to handle flooding configuration
for this FID. Only set for router FIDs (rFIDs) which are used to connect
a {Port, VLAN} to the router block.

2. 'bridge_type': Allows the device to determine the flood table (i.e.,
base index to the PGT table) for the FID. The first type will be used
for FIDs in a VLAN-aware bridge and the second for FIDs representing
VLAN-unaware bridges.

Patch #6 configures the MPE table that determines the egress VLAN of a
packet that is forwarded according to L2 multicast / flood.

Patches #7-#11 add the PGT table and related APIs to allocate entries
and set / clear ports in them.

Patches #12-#13 convert the flooding configuration to use the new PGT
APIs.
====================

Link: https://lore.kernel.org/r/20220627070621.648499-1-idosch@nvidia.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 778964f2 fe94df6d
......@@ -28,7 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o spectrum_trap.o \
spectrum_ethtool.o spectrum_policer.o
spectrum_ethtool.o spectrum_policer.o \
spectrum_pgt.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
......
......@@ -1054,9 +1054,10 @@ enum mlxsw_reg_sfgc_type {
*/
MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
enum mlxsw_reg_sfgc_bridge_type {
MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
/* bridge_type is used in SFGC and SFMR. */
enum mlxsw_reg_bridge_type {
MLXSW_REG_BRIDGE_TYPE_0 = 0, /* Used for .1q FIDs. */
MLXSW_REG_BRIDGE_TYPE_1 = 1, /* Used for .1d FIDs. */
};
/* reg_sfgc_bridge_type
......@@ -1111,15 +1112,16 @@ MLXSW_ITEM32(reg, sfgc, mid_base, 0x10, 0, 16);
static inline void
mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
enum mlxsw_reg_sfgc_bridge_type bridge_type,
enum mlxsw_reg_bridge_type bridge_type,
enum mlxsw_flood_table_type table_type,
unsigned int flood_table)
unsigned int flood_table, u16 mid_base)
{
MLXSW_REG_ZERO(sfgc, payload);
mlxsw_reg_sfgc_type_set(payload, type);
mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfgc_table_type_set(payload, table_type);
mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
mlxsw_reg_sfgc_mid_base_set(payload, mid_base);
}
/* SFDF - Switch Filtering DB Flush
......@@ -1960,7 +1962,8 @@ MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
u16 fid_offset)
u16 fid_offset, bool flood_rsp,
enum mlxsw_reg_bridge_type bridge_type)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
......@@ -1968,6 +1971,8 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
mlxsw_reg_sfmr_vtfp_set(payload, false);
mlxsw_reg_sfmr_vv_set(payload, false);
mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
}
/* SPVMLR - Switch Port VLAN MAC Learning Register
......
......@@ -11,6 +11,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_PGT_SIZE,
MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
......@@ -69,6 +70,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_PGT_SIZE] = 0x1004,
[MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
[MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
......
......@@ -3010,6 +3010,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
err = mlxsw_sp_pgt_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
goto err_pgt_init;
}
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
......@@ -3155,6 +3161,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_ports_create;
}
mlxsw_sp->ubridge = false;
return 0;
err_ports_create:
......@@ -3201,6 +3208,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
err_policers_init:
mlxsw_sp_fids_fini(mlxsw_sp);
err_fids_init:
mlxsw_sp_pgt_fini(mlxsw_sp);
err_pgt_init:
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
return err;
......@@ -3234,6 +3243,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
mlxsw_sp->pgt_smpe_index_valid = true;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
......@@ -3267,6 +3277,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
......@@ -3300,6 +3311,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
......@@ -3333,6 +3345,7 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
......@@ -3365,6 +3378,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
}
......
......@@ -143,6 +143,7 @@ struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
struct mlxsw_sp_pgt;
struct mlxsw_sp_port_mapping {
u8 module;
......@@ -216,6 +217,9 @@ struct mlxsw_sp {
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
bool ubridge;
struct mlxsw_sp_pgt *pgt;
bool pgt_smpe_index_valid;
};
struct mlxsw_sp_ptp_ops {
......@@ -391,6 +395,31 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
struct mlxsw_sp_ports_bitmap {
unsigned long *bitmap;
unsigned int nbits;
};
static inline int
mlxsw_sp_port_bitmap_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ports_bitmap *ports_bm)
{
unsigned int nbits = mlxsw_core_max_ports(mlxsw_sp->core);
ports_bm->nbits = nbits;
ports_bm->bitmap = bitmap_zalloc(nbits, GFP_KERNEL);
if (!ports_bm->bitmap)
return -ENOMEM;
return 0;
}
static inline void
mlxsw_sp_port_bitmap_fini(struct mlxsw_sp_ports_bitmap *ports_bm)
{
bitmap_free(ports_bm->bitmap);
}
static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
bool *trap_en)
{
......@@ -1447,4 +1476,16 @@ int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
/* spectrum_pgt.c */
int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid);
void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base);
int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
u16 count);
void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
u16 count);
int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
u16 smpe, u16 local_port, bool member);
int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
#endif
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include <linux/refcount.h>
#include <linux/idr.h>
#include "spectrum.h"
#include "reg.h"
struct mlxsw_sp_pgt {
struct idr pgt_idr;
u16 end_index; /* Exclusive. */
struct mutex lock; /* Protects PGT. */
bool smpe_index_valid;
};
struct mlxsw_sp_pgt_entry {
struct list_head ports_list;
u16 index;
u16 smpe_index;
};
struct mlxsw_sp_pgt_entry_port {
struct list_head list; /* Member of 'ports_list'. */
u16 local_port;
};
int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
{
int index, err = 0;
mutex_lock(&mlxsw_sp->pgt->lock);
index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
mlxsw_sp->pgt->end_index, GFP_KERNEL);
if (index < 0) {
err = index;
goto err_idr_alloc;
}
*p_mid = index;
mutex_unlock(&mlxsw_sp->pgt->lock);
return 0;
err_idr_alloc:
mutex_unlock(&mlxsw_sp->pgt->lock);
return err;
}
void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
{
mutex_lock(&mlxsw_sp->pgt->lock);
WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
mutex_unlock(&mlxsw_sp->pgt->lock);
}
int
mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
{
unsigned int idr_cursor;
int i, err;
mutex_lock(&mlxsw_sp->pgt->lock);
/* This function is supposed to be called several times as part of
* driver init, in specific order. Verify that the mid_index is the
* first free index in the idr, to be able to free the indexes in case
* of error.
*/
idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
if (WARN_ON(idr_cursor != mid_base)) {
err = -EINVAL;
goto err_idr_cursor;
}
for (i = 0; i < count; i++) {
err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
mid_base, mid_base + count, GFP_KERNEL);
if (err < 0)
goto err_idr_alloc_cyclic;
}
mutex_unlock(&mlxsw_sp->pgt->lock);
return 0;
err_idr_alloc_cyclic:
for (i--; i >= 0; i--)
idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
err_idr_cursor:
mutex_unlock(&mlxsw_sp->pgt->lock);
return err;
}
void
mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
{
struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
int i;
mutex_lock(&mlxsw_sp->pgt->lock);
for (i = 0; i < count; i++)
WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
mutex_unlock(&mlxsw_sp->pgt->lock);
}
static struct mlxsw_sp_pgt_entry_port *
mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
u16 local_port)
{
struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
if (pgt_entry_port->local_port == local_port)
return pgt_entry_port;
}
return NULL;
}
static struct mlxsw_sp_pgt_entry *
mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
{
struct mlxsw_sp_pgt_entry *pgt_entry;
void *ret;
int err;
pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
if (!pgt_entry)
return ERR_PTR(-ENOMEM);
ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
if (IS_ERR(ret)) {
err = PTR_ERR(ret);
goto err_idr_replace;
}
INIT_LIST_HEAD(&pgt_entry->ports_list);
pgt_entry->index = mid;
pgt_entry->smpe_index = smpe;
return pgt_entry;
err_idr_replace:
kfree(pgt_entry);
return ERR_PTR(err);
}
static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
struct mlxsw_sp_pgt_entry *pgt_entry)
{
WARN_ON(!list_empty(&pgt_entry->ports_list));
pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
if (WARN_ON(IS_ERR(pgt_entry)))
return;
kfree(pgt_entry);
}
static struct mlxsw_sp_pgt_entry *
mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
{
struct mlxsw_sp_pgt_entry *pgt_entry;
pgt_entry = idr_find(&pgt->pgt_idr, mid);
if (pgt_entry)
return pgt_entry;
return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
}
static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
{
struct mlxsw_sp_pgt_entry *pgt_entry;
pgt_entry = idr_find(&pgt->pgt_idr, mid);
if (WARN_ON(!pgt_entry))
return;
if (list_empty(&pgt_entry->ports_list))
mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
}
static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
bool member)
{
mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
}
static int
mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_pgt_entry *pgt_entry,
u16 local_port, bool member)
{
bool smpe_index_valid;
char *smid2_pl;
u16 smpe;
int err;
smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
if (!smid2_pl)
return -ENOMEM;
smpe_index_valid = mlxsw_sp->ubridge ? mlxsw_sp->pgt->smpe_index_valid :
false;
smpe = mlxsw_sp->ubridge ? pgt_entry->smpe_index : 0;
mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0, smpe_index_valid,
smpe);
mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
kfree(smid2_pl);
return err;
}
static struct mlxsw_sp_pgt_entry_port *
mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_pgt_entry *pgt_entry,
u16 local_port)
{
struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
int err;
pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
if (!pgt_entry_port)
return ERR_PTR(-ENOMEM);
err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
true);
if (err)
goto err_pgt_entry_port_write;
pgt_entry_port->local_port = local_port;
list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
return pgt_entry_port;
err_pgt_entry_port_write:
kfree(pgt_entry_port);
return ERR_PTR(err);
}
static void
mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_pgt_entry *pgt_entry,
struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
{
list_del(&pgt_entry_port->list);
mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
pgt_entry_port->local_port, false);
kfree(pgt_entry_port);
}
static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
u16 smpe, u16 local_port)
{
struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
struct mlxsw_sp_pgt_entry *pgt_entry;
int err;
mutex_lock(&mlxsw_sp->pgt->lock);
pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
if (IS_ERR(pgt_entry)) {
err = PTR_ERR(pgt_entry);
goto err_pgt_entry_get;
}
pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
local_port);
if (IS_ERR(pgt_entry_port)) {
err = PTR_ERR(pgt_entry_port);
goto err_pgt_entry_port_get;
}
mutex_unlock(&mlxsw_sp->pgt->lock);
return 0;
err_pgt_entry_port_get:
mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
err_pgt_entry_get:
mutex_unlock(&mlxsw_sp->pgt->lock);
return err;
}
static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
u16 mid, u16 smpe, u16 local_port)
{
struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
struct mlxsw_sp_pgt_entry *pgt_entry;
mutex_lock(&mlxsw_sp->pgt->lock);
pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
if (!pgt_entry)
goto out;
pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
if (!pgt_entry_port)
goto out;
mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
out:
mutex_unlock(&mlxsw_sp->pgt->lock);
}
int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
u16 smpe, u16 local_port, bool member)
{
if (member)
return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
local_port);
mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
return 0;
}
int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_pgt *pgt;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
return -EIO;
pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
if (!pgt)
return -ENOMEM;
idr_init(&pgt->pgt_idr);
pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
mutex_init(&pgt->lock);
pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
mlxsw_sp->pgt = pgt;
return 0;
}
void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
{
mutex_destroy(&mlxsw_sp->pgt->lock);
WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
idr_destroy(&mlxsw_sp->pgt->pgt_idr);
kfree(mlxsw_sp->pgt);
}
......@@ -1646,9 +1646,10 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
return err;
}
static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
long *ports_bitmap,
bool set_router_port)
static int
mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
const struct mlxsw_sp_ports_bitmap *ports_bm,
bool set_router_port)
{
char *smid2_pl;
int err, i;
......@@ -1666,7 +1667,7 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
mlxsw_reg_smid2_port_mask_set(smid2_pl,
mlxsw_sp_router_port(mlxsw_sp), 1);
for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
......@@ -1712,14 +1713,14 @@ mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
unsigned long *ports_bitmap)
struct mlxsw_sp_ports_bitmap *ports_bm)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u64 max_lag_members, i;
int lag_id;
if (!bridge_port->lagged) {
set_bit(bridge_port->system_port, ports_bitmap);
set_bit(bridge_port->system_port, ports_bm->bitmap);
} else {
max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_LAG_MEMBERS);
......@@ -1729,13 +1730,13 @@ mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
lag_id, i);
if (mlxsw_sp_port)
set_bit(mlxsw_sp_port->local_port,
ports_bitmap);
ports_bm->bitmap);
}
}
}
static void
mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp *mlxsw_sp)
{
......@@ -1745,7 +1746,7 @@ mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
if (bridge_port->mrouter) {
mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
bridge_port,
flood_bitmap);
flood_bm);
}
}
}
......@@ -1755,8 +1756,7 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mid *mid,
struct mlxsw_sp_bridge_device *bridge_device)
{
long *flood_bitmap;
int num_of_ports;
struct mlxsw_sp_ports_bitmap flood_bitmap;
u16 mid_idx;
int err;
......@@ -1765,18 +1765,17 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
if (mid_idx == MLXSW_SP_MID_MAX)
return -ENOBUFS;
num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
if (!flood_bitmap)
return -ENOMEM;
err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &flood_bitmap);
if (err)
return err;
bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
bitmap_copy(flood_bitmap.bitmap, mid->ports_in_mid, flood_bitmap.nbits);
mlxsw_sp_mc_get_mrouters_bitmap(&flood_bitmap, bridge_device, mlxsw_sp);
mid->mid = mid_idx;
err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, &flood_bitmap,
bridge_device->mrouter);
bitmap_free(flood_bitmap);
mlxsw_sp_port_bitmap_fini(&flood_bitmap);
if (err)
return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment