Commit 22881adf authored by Petr Machata's avatar Petr Machata Committed by David S. Miller

mlxsw: spectrum_buffers: Manage internal buffer in the hdroom code

Traffic mirroring modes that are in-chip implemented on egress need an
internal buffer to work. As the only client, the SPAN module was managing
the buffer so far. However logically it belongs to the buffers module. E.g.
buffer size validation needs to take the size of the internal buffer into
account.

Therefore move the related code from SPAN to spectrum_buffers. Move over
the callbacks that determine the minimum buffer size as a function of
maximum speed and MTU. Add a field describing the internal buffer to struct
mlxsw_sp_hdroom. Extend mlxsw_sp_hdroom_bufs_reset_sizes() to take care of
sizing the internal buffer as well. Change the SPAN module to invoke that
function and mlxsw_sp_hdroom_configure() like all the other hdroom clients.
Drop the now-unnecessary mlxsw_sp_span_port_buffer_disable().
Signed-off-by: default avatarPetr Machata <petrm@nvidia.com>
Reviewed-by: default avatarJiri Pirko <jiri@nvidia.com>
Signed-off-by: default avatarIdo Schimmel <idosch@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a41b9626
...@@ -415,17 +415,6 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -415,17 +415,6 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
return NULL; return NULL;
} }
static inline u32
mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
u32 size_cells)
{
/* Ports with eight lanes use two headroom buffers between which the
* configured headroom size is split. Therefore, multiply the calculated
* headroom size by two.
*/
return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
}
enum mlxsw_sp_flood_type { enum mlxsw_sp_flood_type {
MLXSW_SP_FLOOD_TYPE_UC, MLXSW_SP_FLOOD_TYPE_UC,
MLXSW_SP_FLOOD_TYPE_BC, MLXSW_SP_FLOOD_TYPE_BC,
...@@ -463,6 +452,17 @@ struct mlxsw_sp_hdroom { ...@@ -463,6 +452,17 @@ struct mlxsw_sp_hdroom {
struct { struct {
struct mlxsw_sp_hdroom_buf buf[MLXSW_SP_PB_COUNT]; struct mlxsw_sp_hdroom_buf buf[MLXSW_SP_PB_COUNT];
} bufs; } bufs;
struct {
/* Size actually configured for the internal buffer. Equal to
* reserve when internal buffer is enabled.
*/
u32 size_cells;
/* Space reserved in the headroom for the internal buffer. Port
* buffers are not allowed to grow into this space.
*/
u32 reserve_cells;
bool enable;
} int_buf;
int delay_bytes; int delay_bytes;
int mtu; int mtu;
}; };
......
...@@ -122,6 +122,7 @@ struct mlxsw_sp_sb_vals { ...@@ -122,6 +122,7 @@ struct mlxsw_sp_sb_vals {
}; };
struct mlxsw_sp_sb_ops { struct mlxsw_sp_sb_ops {
u32 (*int_buf_size_get)(int mtu, u32 speed);
}; };
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells) u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
...@@ -134,6 +135,16 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes) ...@@ -134,6 +135,16 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size); return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
} }
static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
u32 size_cells)
{
/* Ports with eight lanes use two headroom buffers between which the
* configured headroom size is split. Therefore, multiply the calculated
* headroom size by two.
*/
return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
}
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
u16 pool_index) u16 pool_index)
{ {
...@@ -343,6 +354,13 @@ static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, ...@@ -343,6 +354,13 @@ static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu); return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
} }
static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
{
u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(speed, mtu);
return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
}
static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf) static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
{ {
int prio; int prio;
...@@ -358,8 +376,21 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -358,8 +376,21 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_hdroom *hdroom) struct mlxsw_sp_hdroom *hdroom)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 reserve_cells;
int i; int i;
/* Internal buffer. */
reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_speed,
mlxsw_sp_port->max_mtu);
reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
hdroom->int_buf.reserve_cells = reserve_cells;
if (hdroom->int_buf.enable)
hdroom->int_buf.size_cells = reserve_cells;
else
hdroom->int_buf.size_cells = 0;
/* PG buffers. */
for (i = 0; i < DCBX_MAX_BUFFERS; i++) { for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i]; struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
u16 thres_cells; u16 thres_cells;
...@@ -442,6 +473,26 @@ static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port ...@@ -442,6 +473,26 @@ static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port
return 0; return 0;
} }
static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_hdroom *hdroom, bool force)
{
char sbib_pl[MLXSW_REG_SBIB_LEN];
bool dirty;
int err;
dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
if (!dirty && !force)
return 0;
mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
if (err)
return err;
mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
return 0;
}
static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_hdroom *hdroom) const struct mlxsw_sp_hdroom *hdroom)
{ {
...@@ -451,6 +502,7 @@ static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp, ...@@ -451,6 +502,7 @@ static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_SP_PB_COUNT; i++) for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
taken_headroom_cells += hdroom->bufs.buf[i].size_cells; taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
taken_headroom_cells += hdroom->int_buf.reserve_cells;
return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells; return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
} }
...@@ -493,9 +545,15 @@ static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -493,9 +545,15 @@ static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
if (err) if (err)
goto err_configure_buffers; goto err_configure_buffers;
err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
if (err)
goto err_configure_int_buf;
*mlxsw_sp_port->hdroom = *hdroom; *mlxsw_sp_port->hdroom = *hdroom;
return 0; return 0;
err_configure_int_buf:
mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
err_configure_buffers: err_configure_buffers:
mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false); mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
err_configure_priomap: err_configure_priomap:
...@@ -1104,13 +1162,44 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = { ...@@ -1104,13 +1162,44 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms), .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
}; };
static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
{
return mtu * 5 / 2;
}
static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
{
return 3 * mtu + buffer_factor * speed / 1000;
}
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
{
int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
}
#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
{
int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
}
const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = { const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
.int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
}; };
const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = { const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
.int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
}; };
const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = { const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
.int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
}; };
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
......
...@@ -968,35 +968,26 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, ...@@ -968,35 +968,26 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
return 0; return 0;
} }
static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
u32 speed)
{ {
u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); struct mlxsw_sp_hdroom hdroom;
return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; hdroom = *mlxsw_sp_port->hdroom;
hdroom.int_buf.enable = enable;
mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
} }
static int static int
mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true);
char sbib_pl[MLXSW_REG_SBIB_LEN];
u32 buffsize;
buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, mlxsw_sp_port->max_speed,
mlxsw_sp_port->max_mtu);
buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
} }
static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port)
u8 local_port)
{ {
char sbib_pl[MLXSW_REG_SBIB_LEN]; mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false);
mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
} }
static struct mlxsw_sp_span_analyzed_port * static struct mlxsw_sp_span_analyzed_port *
...@@ -1145,18 +1136,15 @@ mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span, ...@@ -1145,18 +1136,15 @@ mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
} }
static void static void
mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span, mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_span_analyzed_port * struct mlxsw_sp_span_analyzed_port *
analyzed_port) analyzed_port)
{ {
struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
/* Remove egress mirror buffer now that port is no longer analyzed /* Remove egress mirror buffer now that port is no longer analyzed
* at egress. * at egress.
*/ */
if (!analyzed_port->ingress) if (!analyzed_port->ingress)
mlxsw_sp_span_port_buffer_disable(mlxsw_sp, mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port);
analyzed_port->local_port);
list_del(&analyzed_port->list); list_del(&analyzed_port->list);
kfree(analyzed_port); kfree(analyzed_port);
...@@ -1207,7 +1195,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1207,7 +1195,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
if (!refcount_dec_and_test(&analyzed_port->ref_count)) if (!refcount_dec_and_test(&analyzed_port->ref_count))
goto out_unlock; goto out_unlock;
mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port); mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port);
out_unlock: out_unlock:
mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
...@@ -1661,11 +1649,6 @@ static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) ...@@ -1661,11 +1649,6 @@ static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
return 0; return 0;
} }
static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
{
return mtu * 5 / 2;
}
static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base) u16 policer_id_base)
{ {
...@@ -1674,7 +1657,6 @@ static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, ...@@ -1674,7 +1657,6 @@ static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
.init = mlxsw_sp1_span_init, .init = mlxsw_sp1_span_init,
.buffsize_get = mlxsw_sp1_span_buffsize_get,
.policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
}; };
...@@ -1699,18 +1681,6 @@ static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) ...@@ -1699,18 +1681,6 @@ static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
{
return 3 * mtu + buffer_factor * speed / 1000;
}
static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
{
int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
}
static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base) u16 policer_id_base)
{ {
...@@ -1727,19 +1697,10 @@ static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, ...@@ -1727,19 +1697,10 @@ static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
.init = mlxsw_sp2_span_init, .init = mlxsw_sp2_span_init,
.buffsize_get = mlxsw_sp2_span_buffsize_get,
.policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
}; };
static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
{
int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
}
const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
.init = mlxsw_sp2_span_init, .init = mlxsw_sp2_span_init,
.buffsize_get = mlxsw_sp3_span_buffsize_get,
.policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
}; };
...@@ -47,7 +47,6 @@ struct mlxsw_sp_span_entry_ops; ...@@ -47,7 +47,6 @@ struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_ops { struct mlxsw_sp_span_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp); int (*init)(struct mlxsw_sp *mlxsw_sp);
u32 (*buffsize_get)(int mtu, u32 speed);
int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp, int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base); u16 policer_id_base);
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment