Commit 5ee38975 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-fib-offload'

Jiri Pirko says:

====================
mlxsw: Implement basic FIB offload and router interfaces

Introduce LPM trees management including virtual router management for HW.
Implement basic FIB offloading using switchdev FIB objects. For now only support
local routes and direct routes (next-hop support will be introduced in
a follow-up patchset).

Introduce router interfaces in patches 10-14.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f48cc6b2 99f44bb3
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* Copyright (c) 2015 Mellanox Technologies. All rights reserved. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
* Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -3454,6 +3454,436 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, ...@@ -3454,6 +3454,436 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
} }
/* RALTA - Router Algorithmic LPM Tree Allocation Register
* -------------------------------------------------------
* RALTA is used to allocate the LPM trees of the SHSPM method.
*/
#define MLXSW_REG_RALTA_ID 0x8010
#define MLXSW_REG_RALTA_LEN 0x04
static const struct mlxsw_reg_info mlxsw_reg_ralta = {
.id = MLXSW_REG_RALTA_ID,
.len = MLXSW_REG_RALTA_LEN,
};
/* reg_ralta_op
* opcode (valid for Write, must be 0 on Read)
* 0 - allocate a tree
* 1 - deallocate a tree
* Access: OP
*/
MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
enum mlxsw_reg_ralxx_protocol {
MLXSW_REG_RALXX_PROTOCOL_IPV4,
MLXSW_REG_RALXX_PROTOCOL_IPV6,
};
/* reg_ralta_protocol
* Protocol.
* Deallocation opcode: Reserved.
* Access: RW
*/
MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
/* reg_ralta_tree_id
* An identifier (numbered from 1..cap_shspm_max_trees-1) representing
* the tree identifier (managed by software).
* Note that tree_id 0 is allocated for a default-route tree.
* Access: Index
*/
MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
enum mlxsw_reg_ralxx_protocol protocol,
u8 tree_id)
{
MLXSW_REG_ZERO(ralta, payload);
mlxsw_reg_ralta_op_set(payload, !alloc);
mlxsw_reg_ralta_protocol_set(payload, protocol);
mlxsw_reg_ralta_tree_id_set(payload, tree_id);
}
/* RALST - Router Algorithmic LPM Structure Tree Register
* ------------------------------------------------------
* RALST is used to set and query the structure of an LPM tree.
* The structure of the tree must be sorted as a sorted binary tree, while
* each node is a bin that is tagged as the length of the prefixes the lookup
* will refer to. Therefore, bin X refers to a set of entries with prefixes
* of X bits to match with the destination address. The bin 0 indicates
* the default action, when there is no match of any prefix.
*/
#define MLXSW_REG_RALST_ID 0x8011
#define MLXSW_REG_RALST_LEN 0x104
static const struct mlxsw_reg_info mlxsw_reg_ralst = {
.id = MLXSW_REG_RALST_ID,
.len = MLXSW_REG_RALST_LEN,
};
/* reg_ralst_root_bin
* The bin number of the root bin.
* 0<root_bin=<(length of IP address)
* For a default-route tree configure 0xff
* Access: RW
*/
MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
/* reg_ralst_tree_id
* Tree identifier numbered from 1..(cap_shspm_max_trees-1).
* Access: Index
*/
MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
#define MLXSW_REG_RALST_BIN_OFFSET 0x04
#define MLXSW_REG_RALST_BIN_COUNT 128
/* reg_ralst_left_child_bin
* Holding the children of the bin according to the stored tree's structure.
* For trees composed of less than 4 blocks, the bins in excess are reserved.
* Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
* Access: RW
*/
MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
/* reg_ralst_right_child_bin
* Holding the children of the bin according to the stored tree's structure.
* For trees composed of less than 4 blocks, the bins in excess are reserved.
* Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
* Access: RW
*/
MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
false);
static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
{
MLXSW_REG_ZERO(ralst, payload);
/* Initialize all bins to have no left or right child */
memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
mlxsw_reg_ralst_root_bin_set(payload, root_bin);
mlxsw_reg_ralst_tree_id_set(payload, tree_id);
}
static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
u8 left_child_bin,
u8 right_child_bin)
{
int bin_index = bin_number - 1;
mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
right_child_bin);
}
/* RALTB - Router Algorithmic LPM Tree Binding Register
* ----------------------------------------------------
* RALTB is used to bind virtual router and protocol to an allocated LPM tree.
*/
#define MLXSW_REG_RALTB_ID 0x8012
#define MLXSW_REG_RALTB_LEN 0x04
static const struct mlxsw_reg_info mlxsw_reg_raltb = {
.id = MLXSW_REG_RALTB_ID,
.len = MLXSW_REG_RALTB_LEN,
};
/* reg_raltb_virtual_router
* Virtual Router ID
* Range is 0..cap_max_virtual_routers-1
* Access: Index
*/
MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
/* reg_raltb_protocol
* Protocol.
* Access: Index
*/
MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
/* reg_raltb_tree_id
* Tree to be used for the {virtual_router, protocol}
* Tree identifier numbered from 1..(cap_shspm_max_trees-1).
* By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
* Access: RW
*/
MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
enum mlxsw_reg_ralxx_protocol protocol,
u8 tree_id)
{
MLXSW_REG_ZERO(raltb, payload);
mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
mlxsw_reg_raltb_protocol_set(payload, protocol);
mlxsw_reg_raltb_tree_id_set(payload, tree_id);
}
/* RALUE - Router Algorithmic LPM Unicast Entry Register
* -----------------------------------------------------
* RALUE is used to configure and query LPM entries that serve
* the Unicast protocols.
*/
#define MLXSW_REG_RALUE_ID 0x8013
#define MLXSW_REG_RALUE_LEN 0x38
static const struct mlxsw_reg_info mlxsw_reg_ralue = {
.id = MLXSW_REG_RALUE_ID,
.len = MLXSW_REG_RALUE_LEN,
};
/* reg_ralue_protocol
* Protocol.
* Access: Index
*/
MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
enum mlxsw_reg_ralue_op {
/* Read operation. If entry doesn't exist, the operation fails. */
MLXSW_REG_RALUE_OP_QUERY_READ = 0,
/* Clear on read operation. Used to read entry and
* clear Activity bit.
*/
MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
/* Write operation. Used to write a new entry to the table. All RW
* fields are written for new entry. Activity bit is set
* for new entries.
*/
MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
/* Update operation. Used to update an existing route entry and
* only update the RW fields that are detailed in the field
* op_u_mask. If entry doesn't exist, the operation fails.
*/
MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
/* Clear activity. The Activity bit (the field a) is cleared
* for the entry.
*/
MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
/* Delete operation. Used to delete an existing entry. If entry
* doesn't exist, the operation fails.
*/
MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
};
/* reg_ralue_op
* Operation.
* Access: OP
*/
MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
/* reg_ralue_a
* Activity. Set for new entries. Set if a packet lookup has hit on the
* specific entry, only if the entry is a route. To clear the a bit, use
* "clear activity" op.
* Enabled by activity_dis in RGCR
* Access: RO
*/
MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
/* reg_ralue_virtual_router
* Virtual Router ID
* Range is 0..cap_max_virtual_routers-1
* Access: Index
*/
MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
/* reg_ralue_op_u_mask
* opcode update mask.
* On read operation, this field is reserved.
* This field is valid for update opcode, otherwise - reserved.
* This field is a bitmask of the fields that should be updated.
* Access: WO
*/
MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
/* reg_ralue_prefix_len
* Number of bits in the prefix of the LPM route.
* Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
* two entries in the physical HW table.
* Access: Index
*/
MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
/* reg_ralue_dip*
* The prefix of the route or of the marker that the object of the LPM
* is compared with. The most significant bits of the dip are the prefix.
* The list significant bits must be '0' if the prefix_len is smaller
* than 128 for IPv6 or smaller than 32 for IPv4.
* IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
* Access: Index
*/
MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
enum mlxsw_reg_ralue_entry_type {
MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
};
/* reg_ralue_entry_type
* Entry type.
* Note - for Marker entries, the action_type and action fields are reserved.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
/* reg_ralue_bmp_len
* The best match prefix length in the case that there is no match for
* longer prefixes.
* If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
* Note for any update operation with entry_type modification this
* field must be set.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
enum mlxsw_reg_ralue_action_type {
MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
};
/* reg_ralue_action_type
* Action Type
* Indicates how the IP address is connected.
* It can be connected to a local subnet through local_erif or can be
* on a remote subnet connected through a next-hop router,
* or transmitted to the CPU.
* Reserved when entry_type = MARKER_ENTRY
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
enum mlxsw_reg_ralue_trap_action {
MLXSW_REG_RALUE_TRAP_ACTION_NOP,
MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
};
/* reg_ralue_trap_action
* Trap action.
* For IP2ME action, only NOP and MIRROR are possible.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
/* reg_ralue_trap_id
* Trap ID to be reported to CPU.
* Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
* For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
/* reg_ralue_adjacency_index
* Points to the first entry of the group-based ECMP.
* Only relevant in case of REMOTE action.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
/* reg_ralue_ecmp_size
* Amount of sequential entries starting
* from the adjacency_index (the number of ECMPs).
* The valid range is 1-64, 512, 1024, 2048 and 4096.
* Reserved when trap_action is TRAP or DISCARD_ERROR.
* Only relevant in case of REMOTE action.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
/* reg_ralue_local_erif
* Egress Router Interface.
* Only relevant in case of LOCAL action.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
/* reg_ralue_v
* Valid bit for the tunnel_ptr field.
* If valid = 0 then trap to CPU as IP2ME trap ID.
* If valid = 1 and the packet format allows NVE or IPinIP tunnel
* decapsulation then tunnel decapsulation is done.
* If valid = 1 and packet format does not allow NVE or IPinIP tunnel
* decapsulation then trap as IP2ME trap ID.
* Only relevant in case of IP2ME action.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1);
/* reg_ralue_tunnel_ptr
* Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
* For Spectrum, pointer to KVD Linear.
* Only relevant in case of IP2ME action.
* Access: RW
*/
MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24);
static inline void mlxsw_reg_ralue_pack(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len)
{
MLXSW_REG_ZERO(ralue, payload);
mlxsw_reg_ralue_protocol_set(payload, protocol);
mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
mlxsw_reg_ralue_entry_type_set(payload,
MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
}
static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
u32 dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
mlxsw_reg_ralue_dip4_set(payload, dip);
}
static inline void
mlxsw_reg_ralue_act_remote_pack(char *payload,
enum mlxsw_reg_ralue_trap_action trap_action,
u16 trap_id, u32 adjacency_index, u16 ecmp_size)
{
mlxsw_reg_ralue_action_type_set(payload,
MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
mlxsw_reg_ralue_trap_action_set(payload, trap_action);
mlxsw_reg_ralue_trap_id_set(payload, trap_id);
mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
}
static inline void
mlxsw_reg_ralue_act_local_pack(char *payload,
enum mlxsw_reg_ralue_trap_action trap_action,
u16 trap_id, u16 local_erif)
{
mlxsw_reg_ralue_action_type_set(payload,
MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
mlxsw_reg_ralue_trap_action_set(payload, trap_action);
mlxsw_reg_ralue_trap_id_set(payload, trap_id);
mlxsw_reg_ralue_local_erif_set(payload, local_erif);
}
static inline void
mlxsw_reg_ralue_act_ip2me_pack(char *payload)
{
mlxsw_reg_ralue_action_type_set(payload,
MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
}
/* MFCR - Management Fan Control Register /* MFCR - Management Fan Control Register
* -------------------------------------- * --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism. * This register controls the settings of the Fan Speed PWM mechanism.
...@@ -4196,6 +4626,14 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) ...@@ -4196,6 +4626,14 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "RGCR"; return "RGCR";
case MLXSW_REG_RITR_ID: case MLXSW_REG_RITR_ID:
return "RITR"; return "RITR";
case MLXSW_REG_RALTA_ID:
return "RALTA";
case MLXSW_REG_RALST_ID:
return "RALST";
case MLXSW_REG_RALTB_ID:
return "RALTB";
case MLXSW_REG_RALUE_ID:
return "RALUE";
case MLXSW_REG_MFCR_ID: case MLXSW_REG_MFCR_ID:
return "MFCR"; return "MFCR";
case MLXSW_REG_MFSC_ID: case MLXSW_REG_MFSC_ID:
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/dcbnl.h> #include <linux/dcbnl.h>
#include <linux/inetdevice.h>
#include <net/switchdev.h> #include <net/switchdev.h>
#include <generated/utsrelease.h> #include <generated/utsrelease.h>
...@@ -210,23 +211,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -210,23 +211,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
} }
static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, enum mlxsw_reg_spms_state state)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spms_pl;
int err;
spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
if (!spms_pl)
return -ENOMEM;
mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
kfree(spms_pl);
return err;
}
static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
...@@ -637,87 +621,6 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -637,87 +621,6 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
return 0; return 0;
} }
static struct mlxsw_sp_fid *
mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
{
struct mlxsw_sp_fid *f;
list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) {
if (f->vid == vid)
return f;
}
return NULL;
}
static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
{
return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
MLXSW_SP_VFID_PORT_MAX);
}
static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
{
char sfmr_pl[MLXSW_REG_SFMR_LEN];
mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
u16 vid)
{
struct device *dev = mlxsw_sp->bus_info->dev;
struct mlxsw_sp_fid *f;
u16 vfid, fid;
int err;
vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
if (vfid == MLXSW_SP_VFID_PORT_MAX) {
dev_err(dev, "No available vFIDs\n");
return ERR_PTR(-ERANGE);
}
fid = mlxsw_sp_vfid_to_fid(vfid);
err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
if (err) {
dev_err(dev, "Failed to create FID=%d\n", fid);
return ERR_PTR(err);
}
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
goto err_allocate_vfid;
f->leave = mlxsw_sp_vport_vfid_leave;
f->fid = fid;
f->vid = vid;
list_add(&f->list, &mlxsw_sp->port_vfids.list);
set_bit(vfid, mlxsw_sp->port_vfids.mapped);
return f;
err_allocate_vfid:
mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
return ERR_PTR(-ENOMEM);
}
static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *f)
{
u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
clear_bit(vfid, mlxsw_sp->port_vfids.mapped);
list_del(&f->list);
mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
kfree(f);
}
static struct mlxsw_sp_port * static struct mlxsw_sp_port *
mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{ {
...@@ -750,67 +653,6 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) ...@@ -750,67 +653,6 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
kfree(mlxsw_sp_vport); kfree(mlxsw_sp_vport);
} }
static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool valid)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
vid);
}
static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport)
{
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
struct mlxsw_sp_fid *f;
int err;
f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid);
if (!f) {
f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid);
if (IS_ERR(f))
return PTR_ERR(f);
}
if (!f->ref_count) {
err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
if (err)
goto err_vport_flood_set;
}
err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
if (err)
goto err_vport_fid_map;
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
f->ref_count++;
return 0;
err_vport_fid_map:
if (!f->ref_count)
mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
err_vport_flood_set:
if (!f->ref_count)
mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
return err;
}
static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
{
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
if (--f->ref_count == 0) {
mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
}
}
int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
u16 vid) u16 vid)
{ {
...@@ -848,12 +690,6 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, ...@@ -848,12 +690,6 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
} }
} }
err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
if (err) {
netdev_err(dev, "Failed to join vFID\n");
goto err_vport_vfid_join;
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
if (err) { if (err) {
netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
...@@ -867,22 +703,11 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, ...@@ -867,22 +703,11 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
goto err_port_add_vid; goto err_port_add_vid;
} }
err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
MLXSW_REG_SPMS_STATE_FORWARDING);
if (err) {
netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
goto err_port_stp_state_set;
}
return 0; return 0;
err_port_stp_state_set:
mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
err_port_add_vid: err_port_add_vid:
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
err_port_vid_learning_set: err_port_vid_learning_set:
mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
err_vport_vfid_join:
if (list_is_singular(&mlxsw_sp_port->vports_list)) if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans: err_port_vp_mode_trans:
...@@ -910,13 +735,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, ...@@ -910,13 +735,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0; return 0;
} }
err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
MLXSW_REG_SPMS_STATE_DISCARDING);
if (err) {
netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
return err;
}
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
if (err) { if (err) {
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
...@@ -2417,8 +2235,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, ...@@ -2417,8 +2235,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core; mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info; mlxsw_sp->bus_info = mlxsw_bus_info;
INIT_LIST_HEAD(&mlxsw_sp->fids); INIT_LIST_HEAD(&mlxsw_sp->fids);
INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
err = mlxsw_sp_base_mac_get(mlxsw_sp); err = mlxsw_sp_base_mac_get(mlxsw_sp);
...@@ -2503,6 +2320,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) ...@@ -2503,6 +2320,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
WARN_ON(!list_empty(&mlxsw_sp->fids)); WARN_ON(!list_empty(&mlxsw_sp->fids));
for (i = 0; i < MLXSW_SP_RIF_MAX; i++) for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
WARN_ON_ONCE(mlxsw_sp->rifs[i]); WARN_ON_ONCE(mlxsw_sp->rifs[i]);
...@@ -2567,6 +2385,559 @@ static struct mlxsw_driver mlxsw_sp_driver = { ...@@ -2567,6 +2385,559 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.profile = &mlxsw_sp_config_profile, .profile = &mlxsw_sp_config_profile,
}; };
static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
}
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
struct net_device *lower_dev;
struct list_head *iter;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
if (mlxsw_sp_port_dev_check(lower_dev))
return netdev_priv(lower_dev);
}
return NULL;
}
static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
}
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{
struct net_device *lower_dev;
struct list_head *iter;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
if (mlxsw_sp_port_dev_check(lower_dev))
return netdev_priv(lower_dev);
}
return NULL;
}
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
rcu_read_lock();
mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
if (mlxsw_sp_port)
dev_hold(mlxsw_sp_port->dev);
rcu_read_unlock();
return mlxsw_sp_port;
}
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
{
dev_put(mlxsw_sp_port->dev);
}
static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
unsigned long event)
{
switch (event) {
case NETDEV_UP:
if (!r)
return true;
r->ref_count++;
return false;
case NETDEV_DOWN:
if (r && --r->ref_count == 0)
return true;
/* It is possible we already removed the RIF ourselves
* if it was assigned to a netdev that is now a bridge
* or LAG slave.
*/
return false;
}
return false;
}
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
int i;
for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
if (!mlxsw_sp->rifs[i])
return i;
return MLXSW_SP_RIF_MAX;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
bool *p_lagged, u16 *p_system_port)
{
u8 local_port = mlxsw_sp_vport->local_port;
*p_lagged = mlxsw_sp_vport->lagged;
*p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
}
static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *l3_dev, u16 rif,
bool create)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
bool lagged = mlxsw_sp_vport->lagged;
char ritr_pl[MLXSW_REG_RITR_LEN];
u16 system_port;
mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
l3_dev->mtu, l3_dev->dev_addr);
mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
static struct mlxsw_sp_fid *
mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
{
struct mlxsw_sp_fid *f;
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
f->leave = mlxsw_sp_vport_rif_sp_leave;
f->ref_count = 0;
f->dev = l3_dev;
f->fid = fid;
return f;
}
static struct mlxsw_sp_rif *
mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
{
struct mlxsw_sp_rif *r;
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
return NULL;
ether_addr_copy(r->addr, l3_dev->dev_addr);
r->mtu = l3_dev->mtu;
r->ref_count = 1;
r->dev = l3_dev;
r->rif = rif;
r->f = f;
return r;
}
static struct mlxsw_sp_rif *
mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *l3_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct mlxsw_sp_fid *f;
struct mlxsw_sp_rif *r;
u16 fid, rif;
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
if (rif == MLXSW_SP_RIF_MAX)
return ERR_PTR(-ERANGE);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
if (err)
return ERR_PTR(err);
fid = mlxsw_sp_rif_sp_to_fid(rif);
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
if (err)
goto err_rif_fdb_op;
f = mlxsw_sp_rfid_alloc(fid, l3_dev);
if (!f) {
err = -ENOMEM;
goto err_rfid_alloc;
}
r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
if (!r) {
err = -ENOMEM;
goto err_rif_alloc;
}
f->r = r;
mlxsw_sp->rifs[rif] = r;
return r;
err_rif_alloc:
kfree(f);
err_rfid_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
err_rif_fdb_op:
mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
return ERR_PTR(err);
}
static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
struct mlxsw_sp_rif *r)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct net_device *l3_dev = r->dev;
struct mlxsw_sp_fid *f = r->f;
u16 fid = f->fid;
u16 rif = r->rif;
mlxsw_sp->rifs[rif] = NULL;
f->r = NULL;
kfree(r);
kfree(f);
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
}
static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *l3_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
struct mlxsw_sp_rif *r;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!r) {
r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
if (IS_ERR(r))
return PTR_ERR(r);
}
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
r->f->ref_count++;
netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
return 0;
}
static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
{
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
if (--f->ref_count == 0)
mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
}
static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
struct net_device *port_dev,
unsigned long event, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_vport))
return -EINVAL;
switch (event) {
case NETDEV_UP:
return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
case NETDEV_DOWN:
mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
break;
}
return 0;
}
static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event)
{
if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
struct net_device *lag_dev,
unsigned long event, u16 vid)
{
struct net_device *port_dev;
struct list_head *iter;
int err;
netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
if (mlxsw_sp_port_dev_check(port_dev)) {
err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
event, vid);
if (err)
return err;
}
}
return 0;
}
static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
unsigned long event)
{
if (netif_is_bridge_port(lag_dev))
return 0;
return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
}
static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev)
{
u16 fid;
if (is_vlan_dev(l3_dev))
fid = vlan_dev_vlan_id(l3_dev);
else if (mlxsw_sp->master_bridge.dev == l3_dev)
fid = 1;
else
return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
return mlxsw_sp_fid_find(mlxsw_sp, fid);
}
static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
{
if (mlxsw_sp_fid_is_vfid(fid))
return MLXSW_REG_RITR_FID_IF;
else
return MLXSW_REG_RITR_VLAN_IF;
}
static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
u16 fid, u16 rif,
bool create)
{
enum mlxsw_reg_ritr_if_type rif_type;
char ritr_pl[MLXSW_REG_RITR_LEN];
rif_type = mlxsw_sp_rif_type_get(fid);
mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
l3_dev->dev_addr);
mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
struct mlxsw_sp_fid *f)
{
struct mlxsw_sp_rif *r;
u16 rif;
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
if (rif == MLXSW_SP_RIF_MAX)
return -ERANGE;
err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
if (err)
return err;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
if (err)
goto err_rif_fdb_op;
r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
if (!r) {
err = -ENOMEM;
goto err_rif_alloc;
}
f->r = r;
mlxsw_sp->rifs[rif] = r;
netdev_dbg(l3_dev, "RIF=%d created\n", rif);
return 0;
err_rif_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
err_rif_fdb_op:
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
return err;
}
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *r)
{
struct net_device *l3_dev = r->dev;
struct mlxsw_sp_fid *f = r->f;
u16 rif = r->rif;
mlxsw_sp->rifs[rif] = NULL;
f->r = NULL;
kfree(r);
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
}
static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
struct net_device *br_dev,
unsigned long event)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
struct mlxsw_sp_fid *f;
/* FID can either be an actual FID if the L3 device is the
* VLAN-aware bridge or a VLAN device on top. Otherwise, the
* L3 device is a VLAN-unaware bridge and we get a vFID.
*/
f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
if (WARN_ON(!f))
return -EINVAL;
switch (event) {
case NETDEV_UP:
return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
case NETDEV_DOWN:
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
break;
}
return 0;
}
static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
unsigned long event)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
vid);
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid);
else if (netif_is_bridge_master(real_dev) &&
mlxsw_sp->master_bridge.dev == real_dev)
return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
event);
return 0;
}
static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *r;
int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
goto out;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(r, event))
goto out;
if (mlxsw_sp_port_dev_check(dev))
err = mlxsw_sp_inetaddr_port_event(dev, event);
else if (netif_is_lag_master(dev))
err = mlxsw_sp_inetaddr_lag_event(dev, event);
else if (netif_is_bridge_master(dev))
err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
else if (is_vlan_dev(dev))
err = mlxsw_sp_inetaddr_vlan_event(dev, event);
out:
return notifier_from_errno(err);
}
static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
const char *mac, int mtu)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
int err;
mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
if (err)
return err;
mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
{
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *r;
int err;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
return 0;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!r)
return 0;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
if (err)
return err;
err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
if (err)
goto err_rif_edit;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
if (err)
goto err_rif_fdb_op;
ether_addr_copy(r->addr, dev->dev_addr);
r->mtu = dev->mtu;
netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
return 0;
err_rif_fdb_op:
mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
err_rif_edit:
mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
return err;
}
static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
u16 fid) u16 fid)
{ {
...@@ -2647,9 +3018,15 @@ int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) ...@@ -2647,9 +3018,15 @@ int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
} }
static bool mlxsw_sp_port_dev_check(const struct net_device *dev) static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
{ {
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; struct mlxsw_sp_fid *f, *tmp;
list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
if (--f->ref_count == 0)
mlxsw_sp_fid_destroy(mlxsw_sp, f);
else
WARN_ON_ONCE(1);
} }
static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
...@@ -2668,8 +3045,15 @@ static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, ...@@ -2668,8 +3045,15 @@ static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
{ {
if (--mlxsw_sp->master_bridge.ref_count == 0) if (--mlxsw_sp->master_bridge.ref_count == 0) {
mlxsw_sp->master_bridge.dev = NULL; mlxsw_sp->master_bridge.dev = NULL;
/* It's possible upper VLAN devices are still holding
* references to underlying FIDs. Drop the reference
* and release the resources if it was the last one.
* If it wasn't, then something bad happened.
*/
mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
}
} }
static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
...@@ -3138,47 +3522,97 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, ...@@ -3138,47 +3522,97 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
return 0; return 0;
} }
static struct mlxsw_sp_fid * static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, struct net_device *vlan_dev)
const struct net_device *br_dev)
{ {
u16 fid = vlan_dev_vlan_id(vlan_dev);
struct mlxsw_sp_fid *f; struct mlxsw_sp_fid *f;
list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) { f = mlxsw_sp_fid_find(mlxsw_sp, fid);
if (f->dev == br_dev) if (!f) {
return f; f = mlxsw_sp_fid_create(mlxsw_sp, fid);
if (IS_ERR(f))
return PTR_ERR(f);
} }
return NULL; f->ref_count++;
return 0;
} }
static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
struct net_device *vlan_dev)
{ {
return vfid - MLXSW_SP_VFID_PORT_MAX; u16 fid = vlan_dev_vlan_id(vlan_dev);
struct mlxsw_sp_fid *f;
f = mlxsw_sp_fid_find(mlxsw_sp, fid);
if (f && f->r)
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
if (f && --f->ref_count == 0)
mlxsw_sp_fid_destroy(mlxsw_sp, f);
} }
static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
unsigned long event, void *ptr)
{ {
return MLXSW_SP_VFID_PORT_MAX + br_vfid; struct netdev_notifier_changeupper_info *info;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
int err;
mlxsw_sp = mlxsw_sp_lower_get(br_dev);
if (!mlxsw_sp)
return 0;
if (br_dev != mlxsw_sp->master_bridge.dev)
return 0;
info = ptr;
switch (event) {
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (!is_vlan_dev(upper_dev))
break;
if (info->linking) {
err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
upper_dev);
if (err)
return err;
} else {
mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
}
break;
}
return 0;
} }
static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
{ {
return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, return find_first_zero_bit(mlxsw_sp->vfids.mapped,
MLXSW_SP_VFID_BR_MAX); MLXSW_SP_VFID_MAX);
} }
static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
{
char sfmr_pl[MLXSW_REG_SFMR_LEN];
static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
struct net_device *br_dev) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
struct net_device *br_dev)
{ {
struct device *dev = mlxsw_sp->bus_info->dev; struct device *dev = mlxsw_sp->bus_info->dev;
struct mlxsw_sp_fid *f; struct mlxsw_sp_fid *f;
u16 vfid, fid; u16 vfid, fid;
int err; int err;
vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
if (vfid == MLXSW_SP_VFID_MAX) { if (vfid == MLXSW_SP_VFID_MAX) {
dev_err(dev, "No available vFIDs\n"); dev_err(dev, "No available vFIDs\n");
return ERR_PTR(-ERANGE); return ERR_PTR(-ERANGE);
...@@ -3195,12 +3629,12 @@ static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, ...@@ -3195,12 +3629,12 @@ static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
if (!f) if (!f)
goto err_allocate_vfid; goto err_allocate_vfid;
f->leave = mlxsw_sp_vport_br_vfid_leave; f->leave = mlxsw_sp_vport_vfid_leave;
f->fid = fid; f->fid = fid;
f->dev = br_dev; f->dev = br_dev;
list_add(&f->list, &mlxsw_sp->br_vfids.list); list_add(&f->list, &mlxsw_sp->vfids.list);
set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped); set_bit(vfid, mlxsw_sp->vfids.mapped);
return f; return f;
...@@ -3209,29 +3643,42 @@ static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, ...@@ -3209,29 +3643,42 @@ static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *f) struct mlxsw_sp_fid *f)
{ {
u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid); u16 fid = f->fid;
clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); clear_bit(vfid, mlxsw_sp->vfids.mapped);
list_del(&f->list); list_del(&f->list);
mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false); if (f->r)
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
kfree(f); kfree(f);
mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
} }
static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
struct net_device *br_dev) bool valid)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
vid);
}
static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev)
{ {
struct mlxsw_sp_fid *f; struct mlxsw_sp_fid *f;
int err; int err;
f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
if (!f) { if (!f) {
f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
if (IS_ERR(f)) if (IS_ERR(f))
return PTR_ERR(f); return PTR_ERR(f);
} }
...@@ -3255,11 +3702,11 @@ static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, ...@@ -3255,11 +3702,11 @@ static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
err_vport_flood_set: err_vport_flood_set:
if (!f->ref_count) if (!f->ref_count)
mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
return err; return err;
} }
static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
{ {
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
...@@ -3273,22 +3720,24 @@ static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) ...@@ -3273,22 +3720,24 @@ static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
if (--f->ref_count == 0) if (--f->ref_count == 0)
mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
} }
static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev) struct net_device *br_dev)
{ {
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
struct net_device *dev = mlxsw_sp_vport->dev; struct net_device *dev = mlxsw_sp_vport->dev;
int err; int err;
mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); if (f && !WARN_ON(!f->leave))
f->leave(mlxsw_sp_vport);
err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev); err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
if (err) { if (err) {
netdev_err(dev, "Failed to join vFID\n"); netdev_err(dev, "Failed to join vFID\n");
goto err_vport_br_vfid_join; return err;
} }
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
...@@ -3305,9 +3754,7 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, ...@@ -3305,9 +3754,7 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
return 0; return 0;
err_port_vid_learning_set: err_port_vid_learning_set:
mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
err_vport_br_vfid_join:
mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
return err; return err;
} }
...@@ -3317,12 +3764,7 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) ...@@ -3317,12 +3764,7 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
MLXSW_REG_SPMS_STATE_FORWARDING);
mlxsw_sp_vport->learning = 0; mlxsw_sp_vport->learning = 0;
mlxsw_sp_vport->learning_sync = 0; mlxsw_sp_vport->learning_sync = 0;
...@@ -3338,7 +3780,7 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -3338,7 +3780,7 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) { vport.list) {
struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
if (dev && dev == br_dev) if (dev && dev == br_dev)
return false; return false;
...@@ -3432,10 +3874,14 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, ...@@ -3432,10 +3874,14 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int err = 0; int err = 0;
if (mlxsw_sp_port_dev_check(dev)) if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
err = mlxsw_sp_netdevice_router_port_event(dev);
else if (mlxsw_sp_port_dev_check(dev))
err = mlxsw_sp_netdevice_port_event(dev, event, ptr); err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
else if (netif_is_lag_master(dev)) else if (netif_is_lag_master(dev))
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
else if (netif_is_bridge_master(dev))
err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
else if (is_vlan_dev(dev)) else if (is_vlan_dev(dev))
err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
...@@ -3446,11 +3892,17 @@ static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { ...@@ -3446,11 +3892,17 @@ static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
.notifier_call = mlxsw_sp_netdevice_event, .notifier_call = mlxsw_sp_netdevice_event,
}; };
static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
.notifier_call = mlxsw_sp_inetaddr_event,
.priority = 10, /* Must be called before FIB notifier block */
};
static int __init mlxsw_sp_module_init(void) static int __init mlxsw_sp_module_init(void)
{ {
int err; int err;
register_netdevice_notifier(&mlxsw_sp_netdevice_nb); register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
err = mlxsw_core_driver_register(&mlxsw_sp_driver); err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err) if (err)
goto err_core_driver_register; goto err_core_driver_register;
...@@ -3464,6 +3916,7 @@ static int __init mlxsw_sp_module_init(void) ...@@ -3464,6 +3916,7 @@ static int __init mlxsw_sp_module_init(void)
static void __exit mlxsw_sp_module_exit(void) static void __exit mlxsw_sp_module_exit(void)
{ {
mlxsw_core_driver_unregister(&mlxsw_sp_driver); mlxsw_core_driver_unregister(&mlxsw_sp_driver);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
} }
......
...@@ -43,15 +43,17 @@ ...@@ -43,15 +43,17 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/dcbnl.h> #include <linux/dcbnl.h>
#include <linux/in6.h>
#include <net/switchdev.h> #include <net/switchdev.h>
#include "port.h" #include "port.h"
#include "core.h" #include "core.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_VFID_BASE VLAN_N_VID
#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ #define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */
#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) #define MLXSW_SP_RFID_BASE 15360
#define MLXSW_SP_RIF_MAX 800
#define MLXSW_SP_LAG_MAX 64 #define MLXSW_SP_LAG_MAX 64
#define MLXSW_SP_PORT_PER_LAG_MAX 16 #define MLXSW_SP_PORT_PER_LAG_MAX 16
...@@ -60,6 +62,12 @@ ...@@ -60,6 +62,12 @@
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
#define MLXSW_SP_LPM_TREE_MAX 22
#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96 #define MLXSW_SP_BYTES_PER_CELL 96
...@@ -74,8 +82,6 @@ ...@@ -74,8 +82,6 @@
#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
#define MLXSW_SP_RIF_MAX 800
static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay) static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
{ {
delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE)); delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
...@@ -94,12 +100,16 @@ struct mlxsw_sp_fid { ...@@ -94,12 +100,16 @@ struct mlxsw_sp_fid {
struct list_head list; struct list_head list;
unsigned int ref_count; unsigned int ref_count;
struct net_device *dev; struct net_device *dev;
struct mlxsw_sp_rif *r;
u16 fid; u16 fid;
u16 vid;
}; };
struct mlxsw_sp_rif { struct mlxsw_sp_rif {
struct net_device *dev; struct net_device *dev;
unsigned int ref_count;
struct mlxsw_sp_fid *f;
unsigned char addr[ETH_ALEN];
int mtu;
u16 rif; u16 rif;
}; };
...@@ -123,7 +133,17 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) ...@@ -123,7 +133,17 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
static inline bool mlxsw_sp_fid_is_vfid(u16 fid) static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{ {
return fid >= MLXSW_SP_VFID_BASE; return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
}
static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
{
return fid >= MLXSW_SP_RFID_BASE;
}
static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
{
return MLXSW_SP_RFID_BASE + rif;
} }
struct mlxsw_sp_sb_pr { struct mlxsw_sp_sb_pr {
...@@ -160,15 +180,45 @@ struct mlxsw_sp_sb { ...@@ -160,15 +180,45 @@ struct mlxsw_sp_sb {
} ports[MLXSW_PORT_MAX_PORTS]; } ports[MLXSW_PORT_MAX_PORTS];
}; };
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
struct mlxsw_sp_prefix_usage {
DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
};
enum mlxsw_sp_l3proto {
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_L3_PROTO_IPV6,
};
struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
unsigned int ref_count;
enum mlxsw_sp_l3proto proto;
struct mlxsw_sp_prefix_usage prefix_usage;
};
struct mlxsw_sp_fib;
struct mlxsw_sp_vr {
u16 id; /* virtual router ID */
bool used;
enum mlxsw_sp_l3proto proto;
u32 tb_id; /* kernel fib table id */
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
};
struct mlxsw_sp_router {
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
};
struct mlxsw_sp { struct mlxsw_sp {
struct { struct {
struct list_head list; struct list_head list;
DECLARE_BITMAP(mapped, MLXSW_SP_VFID_PORT_MAX); DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
} port_vfids; } vfids;
struct {
struct list_head list;
DECLARE_BITMAP(mapped, MLXSW_SP_VFID_BR_MAX);
} br_vfids;
struct { struct {
struct list_head list; struct list_head list;
DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX); DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
...@@ -192,6 +242,7 @@ struct mlxsw_sp { ...@@ -192,6 +242,7 @@ struct mlxsw_sp {
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
u8 port_to_module[MLXSW_PORT_MAX_PORTS]; u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb; struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
}; };
static inline struct mlxsw_sp_upper * static inline struct mlxsw_sp_upper *
...@@ -250,6 +301,9 @@ struct mlxsw_sp_port { ...@@ -250,6 +301,9 @@ struct mlxsw_sp_port {
struct list_head vports_list; struct list_head vports_list;
}; };
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
static inline bool static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{ {
...@@ -295,7 +349,7 @@ mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) ...@@ -295,7 +349,7 @@ mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
} }
static inline struct net_device * static inline struct net_device *
mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{ {
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
...@@ -333,6 +387,31 @@ mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -333,6 +387,31 @@ mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
return NULL; return NULL;
} }
static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
u16 fid)
{
struct mlxsw_sp_fid *f;
list_for_each_entry(f, &mlxsw_sp->fids, list)
if (f->fid == fid)
return f;
return NULL;
}
static inline struct mlxsw_sp_fid *
mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev)
{
struct mlxsw_sp_fid *f;
list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
if (f->dev == br_dev)
return f;
return NULL;
}
static inline struct mlxsw_sp_rif * static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev) const struct net_device *dev)
...@@ -403,6 +482,12 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, ...@@ -403,6 +482,12 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid); int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *r);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight); bool dwrr, u8 dwrr_weight);
...@@ -434,5 +519,10 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -434,5 +519,10 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_ipv4_fib *fib4,
struct switchdev_trans *trans);
int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_ipv4_fib *fib4);
#endif #endif
...@@ -35,11 +35,515 @@ ...@@ -35,11 +35,515 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/rhashtable.h>
#include <linux/bitops.h>
#include <linux/in6.h>
#include "spectrum.h" #include "spectrum.h"
#include "core.h" #include "core.h"
#include "reg.h" #include "reg.h"
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
static bool
mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
struct mlxsw_sp_prefix_usage *prefix_usage2)
{
unsigned char prefix;
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
if (!test_bit(prefix, prefix_usage2->b))
return false;
}
return true;
}
static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
struct mlxsw_sp_prefix_usage *prefix_usage2)
{
return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
}
static bool
mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
{
struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
}
static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
struct mlxsw_sp_prefix_usage *prefix_usage2)
{
memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
}
static void
mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
{
memset(prefix_usage, 0, sizeof(*prefix_usage));
}
static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
unsigned char prefix_len)
{
set_bit(prefix_len, prefix_usage->b);
}
static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
unsigned char prefix_len)
{
clear_bit(prefix_len, prefix_usage->b);
}
struct mlxsw_sp_fib_key {
unsigned char addr[sizeof(struct in6_addr)];
unsigned char prefix_len;
};
enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
};
struct mlxsw_sp_fib_entry {
struct rhash_head ht_node;
struct mlxsw_sp_fib_key key;
enum mlxsw_sp_fib_entry_type type;
u8 added:1;
u16 rif; /* used for action local */
struct mlxsw_sp_vr *vr;
};
struct mlxsw_sp_fib {
struct rhashtable ht;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
.key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
.head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
.key_len = sizeof(struct mlxsw_sp_fib_key),
.automatic_shrinking = true,
};
static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
struct mlxsw_sp_fib_entry *fib_entry)
{
unsigned char prefix_len = fib_entry->key.prefix_len;
int err;
err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
mlxsw_sp_fib_ht_params);
if (err)
return err;
if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
return 0;
}
static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
struct mlxsw_sp_fib_entry *fib_entry)
{
unsigned char prefix_len = fib_entry->key.prefix_len;
if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
mlxsw_sp_fib_ht_params);
}
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
size_t addr_len, unsigned char prefix_len)
{
struct mlxsw_sp_fib_entry *fib_entry;
fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
if (!fib_entry)
return NULL;
memcpy(fib_entry->key.addr, addr, addr_len);
fib_entry->key.prefix_len = prefix_len;
return fib_entry;
}
static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
{
kfree(fib_entry);
}
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
size_t addr_len, unsigned char prefix_len)
{
struct mlxsw_sp_fib_key key = {{ 0 } };
memcpy(key.addr, addr, addr_len);
key.prefix_len = prefix_len;
return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
}
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
{
struct mlxsw_sp_fib *fib;
int err;
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
return ERR_PTR(-ENOMEM);
err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
if (err)
goto err_rhashtable_init;
return fib;
err_rhashtable_init:
kfree(fib);
return ERR_PTR(err);
}
static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
{
rhashtable_destroy(&fib->ht);
kfree(fib);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
{
static struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
if (lpm_tree->ref_count == 0) {
if (one_reserved)
one_reserved = false;
else
return lpm_tree;
}
}
return NULL;
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
char ralst_pl[MLXSW_REG_RALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto, bool one_reserved)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
if (err)
return ERR_PTR(err);
err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
lpm_tree);
if (err)
goto err_left_struct_set;
return lpm_tree;
err_left_struct_set:
mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
return ERR_PTR(err);
}
static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto, bool one_reserved)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
if (lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage))
goto inc_ref_count;
}
lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
proto, one_reserved);
if (IS_ERR(lpm_tree))
return lpm_tree;
inc_ref_count:
lpm_tree->ref_count++;
return lpm_tree;
}
static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
if (--lpm_tree->ref_count == 0)
return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
return 0;
}
static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_vr *vr;
int i;
for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
return vr;
}
return NULL;
}
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr)
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr)
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
{
/* For our purpose, squash main and local table into one */
if (tb_id == RT_TABLE_LOCAL)
tb_id = RT_TABLE_MAIN;
return tb_id;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
return vr;
}
return NULL;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
unsigned char prefix_len,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_prefix_usage req_prefix_usage;
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr)
return ERR_PTR(-EBUSY);
vr->fib = mlxsw_sp_fib_create();
if (IS_ERR(vr->fib))
return ERR_CAST(vr->fib);
vr->proto = proto;
vr->tb_id = tb_id;
mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
proto, true);
if (IS_ERR(lpm_tree)) {
err = PTR_ERR(lpm_tree);
goto err_tree_get;
}
vr->lpm_tree = lpm_tree;
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
if (err)
goto err_tree_bind;
vr->used = true;
return vr;
err_tree_bind:
mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
err_tree_get:
mlxsw_sp_fib_destroy(vr->fib);
return ERR_PTR(err);
}
static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr)
{
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
mlxsw_sp_fib_destroy(vr->fib);
vr->used = false;
}
static int
mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
struct mlxsw_sp_prefix_usage *req_prefix_usage)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
&vr->lpm_tree->prefix_usage))
return 0;
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
vr->proto, false);
if (IS_ERR(lpm_tree)) {
/* We failed to get a tree according to the required
* prefix usage. However, the current tree might be still good
* for us if our requirement is subset of the prefixes used
* in the tree.
*/
if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
&vr->lpm_tree->prefix_usage))
return 0;
return PTR_ERR(lpm_tree);
}
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
vr->lpm_tree = lpm_tree;
return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
unsigned char prefix_len,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_vr *vr;
int err;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
if (!vr) {
vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
if (IS_ERR(vr))
return vr;
} else {
struct mlxsw_sp_prefix_usage req_prefix_usage;
mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
&vr->fib->prefix_usage);
mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
/* Need to replace LPM tree in case new prefix is required. */
err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
&req_prefix_usage);
if (err)
return ERR_PTR(err);
}
return vr;
}
static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
{
/* Destroy virtual router entity in case the associated FIB is empty
* and allow it to be used for other tables in future. Otherwise,
* check if some prefix usage did not disappear and change tree if
* that is the case. Note that in case new, smaller tree cannot be
* allocated, the original one will be kept being used.
*/
if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
mlxsw_sp_vr_destroy(mlxsw_sp, vr);
else
mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
&vr->fib->prefix_usage);
}
static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_vr *vr;
int i;
for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
vr = &mlxsw_sp->router.vrs[i];
vr->id = i;
}
}
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{ {
char rgcr_pl[MLXSW_REG_RGCR_LEN]; char rgcr_pl[MLXSW_REG_RGCR_LEN];
...@@ -59,10 +563,252 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) ...@@ -59,10 +563,252 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{ {
return __mlxsw_sp_router_init(mlxsw_sp); int err;
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
return err;
mlxsw_sp_lpm_init(mlxsw_sp);
mlxsw_sp_vrs_init(mlxsw_sp);
return 0;
} }
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{ {
__mlxsw_sp_router_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp);
} }
static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_local_pack(ralue_pl,
MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
fib_entry->rif);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
return -EINVAL;
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
switch (fib_entry->vr->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
case MLXSW_SP_L3_PROTO_IPV6:
return -EINVAL;
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
enum mlxsw_reg_ralue_op op;
op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
MLXSW_REG_RALUE_OP_WRITE_UPDATE;
return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
struct mlxsw_sp_router_fib4_add_info {
struct switchdev_trans_item tritem;
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
};
static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
{
const struct mlxsw_sp_router_fib4_add_info *info = data;
struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
mlxsw_sp_fib_entry_destroy(fib_entry);
mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr);
kfree(info);
}
static int
mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
const struct switchdev_obj_ipv4_fib *fib4,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct fib_info *fi = fib4->fi;
if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
}
if (fib4->type != RTN_UNICAST)
return -EINVAL;
if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
struct mlxsw_sp_rif *r;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
if (!r)
return -EINVAL;
fib_entry->rif = r->rif;
return 0;
}
return -EINVAL;
}
static int
mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_ipv4_fib *fib4,
struct switchdev_trans *trans)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
return PTR_ERR(vr);
fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
sizeof(fib4->dst), fib4->dst_len);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
if (err)
goto err_fib4_entry_init;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
err = -ENOMEM;
goto err_alloc_info;
}
info->mlxsw_sp = mlxsw_sp;
info->fib_entry = fib_entry;
switchdev_trans_item_enqueue(trans, info,
mlxsw_sp_router_fib4_add_info_destroy,
&info->tritem);
return 0;
err_alloc_info:
err_fib4_entry_init:
mlxsw_sp_fib_entry_destroy(fib_entry);
err_fib_entry_create:
mlxsw_sp_vr_put(mlxsw_sp, vr);
return err;
}
static int
mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_ipv4_fib *fib4,
struct switchdev_trans *trans)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
int err;
info = switchdev_trans_item_dequeue(trans);
fib_entry = info->fib_entry;
kfree(info);
vr = fib_entry->vr;
err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
if (err)
goto err_fib_entry_insert;
err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_add;
return 0;
err_fib_entry_add:
mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
err_fib_entry_insert:
mlxsw_sp_fib_entry_destroy(fib_entry);
mlxsw_sp_vr_put(mlxsw_sp, vr);
return err;
}
int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_ipv4_fib *fib4,
struct switchdev_trans *trans)
{
if (switchdev_trans_ph_prepare(trans))
return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
fib4, trans);
return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
fib4, trans);
}
int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_ipv4_fib *fib4)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
if (!vr) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
return -ENOENT;
}
fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
sizeof(fib4->dst), fib4->dst_len);
if (!fib_entry) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
return PTR_ERR(vr);
}
mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
mlxsw_sp_fib_entry_destroy(fib_entry);
mlxsw_sp_vr_put(mlxsw_sp, vr);
return 0;
}
...@@ -166,11 +166,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -166,11 +166,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
} }
static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
{
return vfid >= MLXSW_SP_VFID_PORT_MAX;
}
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 idx_begin, u16 idx_end, bool set, u16 idx_begin, u16 idx_end, bool set,
bool only_uc) bool only_uc)
...@@ -182,15 +177,10 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -182,15 +177,10 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
char *sftr_pl; char *sftr_pl;
int err; int err;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
if (mlxsw_sp_vfid_is_vport_br(idx_begin)) else
local_port = mlxsw_sp_port->local_port;
else
local_port = MLXSW_PORT_CPU_PORT;
} else {
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
}
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl) if (!sftr_pl)
...@@ -384,18 +374,6 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, ...@@ -384,18 +374,6 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err; return err;
} }
static struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
u16 fid)
{
struct mlxsw_sp_fid *f;
list_for_each_entry(f, &mlxsw_sp->fids, list)
if (f->fid == fid)
return f;
return NULL;
}
static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
{ {
char sfmr_pl[MLXSW_REG_SFMR_LEN]; char sfmr_pl[MLXSW_REG_SFMR_LEN];
...@@ -426,8 +404,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid) ...@@ -426,8 +404,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
return f; return f;
} }
static struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
u16 fid)
{ {
struct mlxsw_sp_fid *f; struct mlxsw_sp_fid *f;
int err; int err;
...@@ -462,13 +439,15 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, ...@@ -462,13 +439,15 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(err); return ERR_PTR(err);
} }
static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
struct mlxsw_sp_fid *f)
{ {
u16 fid = f->fid; u16 fid = f->fid;
list_del(&f->list); list_del(&f->list);
if (f->r)
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
kfree(f); kfree(f);
mlxsw_sp_fid_op(mlxsw_sp, fid, false); mlxsw_sp_fid_op(mlxsw_sp, fid, false);
...@@ -753,9 +732,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) ...@@ -753,9 +732,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE; MLXSW_REG_SFD_OP_WRITE_REMOVE;
} }
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding, const char *mac, u16 fid, bool adding,
bool dynamic) enum mlxsw_reg_sfd_rec_action action,
bool dynamic)
{ {
char *sfd_pl; char *sfd_pl;
int err; int err;
...@@ -766,14 +746,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, ...@@ -766,14 +746,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, mac, fid, action, local_port);
local_port);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl); kfree(sfd_pl);
return err; return err;
} }
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding,
bool dynamic)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
false);
}
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
const char *mac, u16 fid, u16 lag_vid, const char *mac, u16 fid, u16 lag_vid,
bool adding, bool dynamic) bool adding, bool dynamic)
...@@ -978,6 +973,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, ...@@ -978,6 +973,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj), SWITCHDEV_OBJ_PORT_VLAN(obj),
trans); trans);
break; break;
case SWITCHDEV_OBJ_ID_IPV4_FIB:
err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
SWITCHDEV_OBJ_IPV4_FIB(obj),
trans);
break;
case SWITCHDEV_OBJ_ID_PORT_FDB: case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj), SWITCHDEV_OBJ_PORT_FDB(obj),
...@@ -1123,6 +1123,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, ...@@ -1123,6 +1123,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj)); SWITCHDEV_OBJ_PORT_VLAN(obj));
break; break;
case SWITCHDEV_OBJ_ID_IPV4_FIB:
err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
SWITCHDEV_OBJ_IPV4_FIB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_FDB: case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj)); SWITCHDEV_OBJ_PORT_FDB(obj));
......
...@@ -3804,12 +3804,30 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev, ...@@ -3804,12 +3804,30 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
void *netdev_lower_get_next(struct net_device *dev, void *netdev_lower_get_next(struct net_device *dev,
struct list_head **iter); struct list_head **iter);
#define netdev_for_each_lower_dev(dev, ldev, iter) \ #define netdev_for_each_lower_dev(dev, ldev, iter) \
for (iter = (dev)->adj_list.lower.next, \ for (iter = (dev)->adj_list.lower.next, \
ldev = netdev_lower_get_next(dev, &(iter)); \ ldev = netdev_lower_get_next(dev, &(iter)); \
ldev; \ ldev; \
ldev = netdev_lower_get_next(dev, &(iter))) ldev = netdev_lower_get_next(dev, &(iter)))
struct net_device *netdev_all_lower_get_next(struct net_device *dev,
struct list_head **iter);
struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
struct list_head **iter);
#define netdev_for_each_all_lower_dev(dev, ldev, iter) \
for (iter = (dev)->all_adj_list.lower.next, \
ldev = netdev_all_lower_get_next(dev, &(iter)); \
ldev; \
ldev = netdev_all_lower_get_next(dev, &(iter)))
#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
for (iter = (dev)->all_adj_list.lower.next, \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
ldev; \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev); void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
......
...@@ -5444,6 +5444,52 @@ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) ...@@ -5444,6 +5444,52 @@ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
} }
EXPORT_SYMBOL(netdev_lower_get_next); EXPORT_SYMBOL(netdev_lower_get_next);
/**
* netdev_all_lower_get_next - Get the next device from all lower neighbour list
* @dev: device
* @iter: list_head ** of the current position
*
* Gets the next netdev_adjacent from the dev's all lower neighbour
* list, starting from iter position. The caller must hold RTNL lock or
* its own locking that guarantees that the neighbour all lower
* list will remain unchanged.
*/
struct net_device *netdev_all_lower_get_next(struct net_device *dev, struct list_head **iter)
{
struct netdev_adjacent *lower;
lower = list_entry(*iter, struct netdev_adjacent, list);
if (&lower->list == &dev->all_adj_list.lower)
return NULL;
*iter = lower->list.next;
return lower->dev;
}
EXPORT_SYMBOL(netdev_all_lower_get_next);
/**
* netdev_all_lower_get_next_rcu - Get the next device from all
* lower neighbour list, RCU variant
* @dev: device
* @iter: list_head ** of the current position
*
* Gets the next netdev_adjacent from the dev's all lower neighbour
* list, starting from iter position. The caller must hold RCU read lock.
*/
struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
struct list_head **iter)
{
struct netdev_adjacent *lower;
lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
struct netdev_adjacent, list);
return lower ? lower->dev : NULL;
}
EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
/** /**
* netdev_lower_get_first_private_rcu - Get the first ->private from the * netdev_lower_get_first_private_rcu - Get the first ->private from the
* lower neighbour list, RCU * lower neighbour list, RCU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment