Commit 914c4fc1 authored by Petr Machata's avatar Petr Machata Committed by David S. Miller

mlxsw: spectrum: Use guaranteed buffer size as pool size limit

There are two resources associated with shared buffer size:
cap_total_buffer_size, and cap_guaranteed_shared_buffer. So far, mlxsw has
been using the former as a limit to determine how large a pool size is
allowed to be. However, the total size also includes headrooms and reserved
space, which really cannot be used for shared buffer pools.

Therefore convert mlxsw to use the latter resource as a limit. Adjust
hard-coded pool sizes to be the guaranteed size minus 256000 bytes for CPU
port pool. On Spectrum-1 that actually leads to an increase. A follow-up
patch will have this size calculated automatically.
Signed-off-by: default avatarPetr Machata <petrm@mellanox.com>
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 06cd9da5
...@@ -26,7 +26,7 @@ enum mlxsw_res_id { ...@@ -26,7 +26,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_LOCAL_PORTS_IN_1X, MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
MLXSW_RES_ID_LOCAL_PORTS_IN_2X, MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
MLXSW_RES_ID_MAX_BUFFER_SIZE, MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE, MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE, MLXSW_RES_ID_MAX_HEADROOM_SIZE,
MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
...@@ -82,7 +82,7 @@ static u16 mlxsw_res_ids[] = { ...@@ -82,7 +82,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610, [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
[MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611, [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
[MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */ [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
[MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
......
...@@ -421,8 +421,8 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) ...@@ -421,8 +421,8 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
.freeze_size = _freeze_size, \ .freeze_size = _freeze_size, \
} }
#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000 #define MLXSW_SP1_SB_PR_INGRESS_SIZE 13768608
#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000 #define MLXSW_SP1_SB_PR_EGRESS_SIZE 13768608
#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000) #define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp1_sb_pool_dess */ /* Order according to mlxsw_sp1_sb_pool_dess */
...@@ -445,8 +445,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = { ...@@ -445,8 +445,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
MLXSW_SP1_SB_PR_CPU_SIZE, true, false), MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
}; };
#define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568 #define MLXSW_SP2_SB_PR_INGRESS_SIZE 34084800
#define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568 #define MLXSW_SP2_SB_PR_EGRESS_SIZE 34084800
#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000) #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp2_sb_pool_dess */ /* Order according to mlxsw_sp2_sb_pool_dess */
...@@ -904,7 +904,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) ...@@ -904,7 +904,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
return -EIO; return -EIO;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
return -EIO; return -EIO;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE)) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
...@@ -915,7 +915,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) ...@@ -915,7 +915,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM; return -ENOMEM;
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_BUFFER_SIZE); GUARANTEED_SHARED_BUFFER);
max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_HEADROOM_SIZE); MAX_HEADROOM_SIZE);
/* Round down, because this limit must not be overstepped. */ /* Round down, because this limit must not be overstepped. */
...@@ -1013,7 +1013,8 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, ...@@ -1013,7 +1013,8 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
mode = (enum mlxsw_reg_sbpr_mode) threshold_type; mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
pr = &mlxsw_sp->sb_vals->prs[pool_index]; pr = &mlxsw_sp->sb_vals->prs[pool_index];
if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
GUARANTEED_SHARED_BUFFER)) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size"); NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
return -EINVAL; return -EINVAL;
} }
......
...@@ -305,7 +305,8 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -305,7 +305,8 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
p->max); p->max);
return -EINVAL; return -EINVAL;
} }
if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
GUARANTEED_SHARED_BUFFER)) {
dev_err(mlxsw_sp->bus_info->dev, dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max); "spectrum: RED: max value %u is too big\n", p->max);
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment