Commit 2919ee2d authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-cleanups'

Ido Schimmel says:

====================
mlxsw: Various cleanups

The first nine patches from Jiri perform small and unrelated cleanups.
The largest being the conversion of the KVD linear partitions from a
list to an array, which simplifies the code.

The last patch from Petr is a bug fix for a recent net-next commit that
prevented the "kvd" resource from being marked as the parent of its
various child resources (e.g., "/kvd/linear").

v2: Dropped devlink patch following David's comment. Will be sent
separately.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 06b19fe9 59441fef
......@@ -1008,6 +1008,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
struct mlxsw_res *res;
size_t alloc_size;
int err;
......@@ -1032,8 +1033,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
&mlxsw_core->res);
res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
if (err)
goto err_bus_init;
......
......@@ -235,8 +235,7 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
used_kvd_split_data:1; /* indicate for the kvd's values */
used_kvd_sizes:1;
u8 max_vepa_channels;
u16 max_mid;
u16 max_pgt;
......@@ -256,10 +255,8 @@ struct mlxsw_config_profile {
u16 adaptive_routing_group_cap;
u8 arn;
u32 kvd_linear_size;
u16 kvd_hash_granularity;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
......@@ -316,6 +313,7 @@ struct mlxsw_driver {
u64 *p_linear_size);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool res_query_enabled;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
......@@ -326,14 +324,14 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
#define MLXSW_CORE_RES_VALID(res, short_res_id) \
mlxsw_core_res_valid(res, MLXSW_RES_ID_##short_res_id)
#define MLXSW_CORE_RES_VALID(mlxsw_core, short_res_id) \
mlxsw_core_res_valid(mlxsw_core, MLXSW_RES_ID_##short_res_id)
u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
#define MLXSW_CORE_RES_GET(res, short_res_id) \
mlxsw_core_res_get(res, MLXSW_RES_ID_##short_res_id)
#define MLXSW_CORE_RES_GET(mlxsw_core, short_res_id) \
mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id)
#define MLXSW_BUS_F_TXRX BIT(0)
......
......@@ -1015,16 +1015,14 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
}
static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_res *res,
u8 query_enabled)
struct mlxsw_res *res)
{
int index, i;
u64 data;
u16 id;
int err;
/* Not all the versions support resources query */
if (!query_enabled)
if (!res)
return 0;
mlxsw_cmd_mbox_zero(mbox);
......@@ -1164,7 +1162,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
if (MLXSW_RES_VALID(res, KVD_SIZE)) {
if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
return err;
......@@ -1376,8 +1374,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_boardinfo;
err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res,
profile->resource_query_enable);
err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res);
if (err)
goto err_query_resources;
......
......@@ -3793,8 +3793,7 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
.used_kvd_split_data = 1,
.kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
.used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
......@@ -3804,7 +3803,6 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
.resource_query_enable = 1,
};
static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
......@@ -3815,7 +3813,7 @@ static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
}
static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
};
......@@ -3894,7 +3892,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
err = mlxsw_sp_kvdl_resources_register(devlink);
err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
if (err)
return err;
......@@ -3902,7 +3900,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
double_size *= profile->kvd_hash_double_parts;
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
double_size = rounddown(double_size, profile->kvd_hash_granularity);
double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
......@@ -3935,8 +3933,7 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
!profile->used_kvd_split_data)
!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
return -EIO;
/* The hash part is what left of the kvd without the
......@@ -3962,7 +3959,7 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
*p_double_size = rounddown(double_size,
profile->kvd_hash_granularity);
MLXSW_SP_KVD_GRANULARITY);
}
err = devlink_resource_size_get(devlink,
......@@ -4004,6 +4001,7 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
.res_query_enabled = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
......
......@@ -75,7 +75,7 @@
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD,
MLXSW_SP_RESOURCE_KVD = 1,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
......@@ -443,7 +443,7 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_kvdl_resources_register(struct devlink *devlink);
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
......
......@@ -33,8 +33,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
#ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
#define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
#include "spectrum.h"
......
......@@ -55,24 +55,47 @@
#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
(MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
#define MLXSW_SP_CHUNK_MAX 32
#define MLXSW_SP_LARGE_CHUNK_MAX 512
#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
struct mlxsw_sp_kvdl_part_info {
unsigned int part_index;
unsigned int start_index;
unsigned int end_index;
unsigned int alloc_size;
enum mlxsw_sp_resource_id resource_id;
};
enum mlxsw_sp_kvdl_part_id {
MLXSW_SP_KVDL_PART_ID_SINGLE,
MLXSW_SP_KVDL_PART_ID_CHUNKS,
MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
};
#define MLXSW_SP_KVDL_PART_INFO(id) \
[MLXSW_SP_KVDL_PART_ID_##id] = { \
.start_index = MLXSW_SP_KVDL_##id##_BASE, \
.end_index = MLXSW_SP_KVDL_##id##_END, \
.alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \
.resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
}
static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
MLXSW_SP_KVDL_PART_INFO(SINGLE),
MLXSW_SP_KVDL_PART_INFO(CHUNKS),
MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
};
#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
struct mlxsw_sp_kvdl_part {
struct list_head list;
struct mlxsw_sp_kvdl_part_info *info;
struct mlxsw_sp_kvdl_part_info info;
unsigned long usage[0]; /* Entries */
};
struct mlxsw_sp_kvdl {
struct list_head parts_list;
struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
};
static struct mlxsw_sp_kvdl_part *
......@@ -80,11 +103,13 @@ mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
unsigned int alloc_size)
{
struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
int i;
list_for_each_entry(part, &kvdl->parts_list, list) {
if (alloc_size <= part->info->alloc_size &&
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (alloc_size <= part->info.alloc_size &&
(!min_part ||
part->info->alloc_size <= min_part->info->alloc_size))
part->info.alloc_size <= min_part->info.alloc_size))
min_part = part;
}
......@@ -95,10 +120,12 @@ static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
{
struct mlxsw_sp_kvdl_part *part;
int i;
list_for_each_entry(part, &kvdl->parts_list, list) {
if (kvdl_index >= part->info->start_index &&
kvdl_index <= part->info->end_index)
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (kvdl_index >= part->info.start_index &&
kvdl_index <= part->info.end_index)
return part;
}
......@@ -122,7 +149,7 @@ mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
u32 *p_kvdl_index)
{
const struct mlxsw_sp_kvdl_part_info *info = part->info;
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int entry_index, nr_entries;
nr_entries = (info->end_index - info->start_index + 1) /
......@@ -132,8 +159,7 @@ static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
return -ENOBUFS;
__set_bit(entry_index, part->usage);
*p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(part->info,
entry_index);
*p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
return 0;
}
......@@ -141,10 +167,10 @@ static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
u32 kvdl_index)
{
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int entry_index;
entry_index = mlxsw_sp_kvdl_index_entry_index(part->info,
kvdl_index);
entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
__clear_bit(entry_index, part->usage);
}
......@@ -183,74 +209,30 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(part))
return PTR_ERR(part);
*p_alloc_size = part->info->alloc_size;
*p_alloc_size = part->info.alloc_size;
return 0;
}
enum mlxsw_sp_kvdl_part_id {
MLXSW_SP_KVDL_PART_SINGLE,
MLXSW_SP_KVDL_PART_CHUNKS,
MLXSW_SP_KVDL_PART_LARGE_CHUNKS,
};
static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = {
{
.part_index = MLXSW_SP_KVDL_PART_SINGLE,
.start_index = MLXSW_SP_KVDL_SINGLE_BASE,
.end_index = MLXSW_SP_KVDL_SINGLE_END,
.alloc_size = 1,
},
{
.part_index = MLXSW_SP_KVDL_PART_CHUNKS,
.start_index = MLXSW_SP_KVDL_CHUNKS_BASE,
.end_index = MLXSW_SP_KVDL_CHUNKS_END,
.alloc_size = MLXSW_SP_CHUNK_MAX,
},
{
.part_index = MLXSW_SP_KVDL_PART_LARGE_CHUNKS,
.start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE,
.end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END,
.alloc_size = MLXSW_SP_LARGE_CHUNK_MAX,
},
};
static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index)
{
struct mlxsw_sp_kvdl_part *part;
list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) {
if (part->info->part_index == part_index)
return part;
}
return NULL;
}
static void
mlxsw_sp_kvdl_part_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_kvdl_part *part, unsigned int size)
static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
struct mlxsw_sp_kvdl_part *part_prev,
unsigned int size)
{
struct mlxsw_sp_kvdl_part_info *info = part->info;
if (list_is_last(&part->list, &mlxsw_sp->kvdl->parts_list)) {
info->end_index = size - 1;
} else {
struct mlxsw_sp_kvdl_part *last_part;
last_part = list_next_entry(part, list);
info->start_index = last_part->info->end_index + 1;
info->end_index = info->start_index + size - 1;
if (!part_prev) {
part->info.end_index = size - 1;
} else {
part->info.start_index = part_prev->info.end_index + 1;
part->info.end_index = part->info.start_index + size - 1;
}
}
static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
unsigned int part_index)
static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_kvdl_part_info *info,
struct mlxsw_sp_kvdl_part *part_prev)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
const struct mlxsw_sp_kvdl_part_info *info;
enum mlxsw_sp_resource_id resource_id;
struct mlxsw_sp_kvdl_part *part;
bool need_update = true;
unsigned int nr_entries;
......@@ -258,23 +240,8 @@ static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
u64 resource_size;
int err;
info = &kvdl_parts_info[part_index];
switch (part_index) {
case MLXSW_SP_KVDL_PART_SINGLE:
resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE;
break;
case MLXSW_SP_KVDL_PART_CHUNKS:
resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS;
break;
case MLXSW_SP_KVDL_PART_LARGE_CHUNKS:
resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS;
break;
default:
return -EINVAL;
}
err = devlink_resource_size_get(devlink, resource_id, &resource_size);
err = devlink_resource_size_get(devlink, info->resource_id,
&resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
......@@ -284,86 +251,77 @@ static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
part->info = kmemdup(info, sizeof(*part->info), GFP_KERNEL);
if (!part->info)
goto err_part_info_alloc;
memcpy(&part->info, info, sizeof(part->info));
list_add(&part->list, &mlxsw_sp->kvdl->parts_list);
if (need_update)
mlxsw_sp_kvdl_part_update(mlxsw_sp, part, resource_size);
return 0;
err_part_info_alloc:
kfree(part);
return -ENOMEM;
mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
return part;
}
static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp,
unsigned int part_index)
static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
{
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp_kvdl_part_find(mlxsw_sp, part_index);
if (!part)
return;
list_del(&part->list);
kfree(part->info);
kfree(part);
}
static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
const struct mlxsw_sp_kvdl_part_info *info;
struct mlxsw_sp_kvdl_part *part_prev = NULL;
int err, i;
INIT_LIST_HEAD(&mlxsw_sp->kvdl->parts_list);
for (i = 0; i < ARRAY_SIZE(kvdl_parts_info); i++) {
err = mlxsw_sp_kvdl_part_init(mlxsw_sp, i);
if (err)
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
info = &mlxsw_sp_kvdl_parts_info[i];
kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
part_prev);
if (IS_ERR(kvdl->parts[i])) {
err = PTR_ERR(kvdl->parts[i]);
goto err_kvdl_part_init;
}
part_prev = kvdl->parts[i];
}
return 0;
err_kvdl_part_init:
for (i--; i >= 0; i--)
mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
return err;
}
static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
int i;
for (i = ARRAY_SIZE(kvdl_parts_info) - 1; i >= 0; i--)
mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
}
static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
{
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int nr_entries;
int bit = -1;
u64 occ = 0;
nr_entries = (part->info->end_index -
part->info->start_index + 1) /
part->info->alloc_size;
nr_entries = (info->end_index -
info->start_index + 1) /
info->alloc_size;
while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
< nr_entries)
occ += part->info->alloc_size;
occ += info->alloc_size;
return occ;
}
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl_part *part;
u64 occ = 0;
int i;
list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
occ += mlxsw_sp_kvdl_part_occ(part);
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
return occ;
}
......@@ -374,10 +332,7 @@ static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp_kvdl_part_find(mlxsw_sp, MLXSW_SP_KVDL_PART_SINGLE);
if (!part)
return -EINVAL;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
return mlxsw_sp_kvdl_part_occ(part);
}
......@@ -387,10 +342,7 @@ static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp_kvdl_part_find(mlxsw_sp, MLXSW_SP_KVDL_PART_CHUNKS);
if (!part)
return -EINVAL;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
......@@ -400,87 +352,65 @@ static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp_kvdl_part_find(mlxsw_sp,
MLXSW_SP_KVDL_PART_LARGE_CHUNKS);
if (!part)
return -EINVAL;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
static struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
.occ_get = mlxsw_sp_kvdl_single_occ_get,
};
static struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
.occ_get = mlxsw_sp_kvdl_chunks_occ_get,
};
static struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
.occ_get = mlxsw_sp_kvdl_large_chunks_occ_get,
};
static struct devlink_resource_size_params mlxsw_sp_kvdl_single_size_params = {
.size_min = 0,
.size_granularity = 1,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY,
};
static struct devlink_resource_size_params mlxsw_sp_kvdl_chunks_size_params = {
.size_min = 0,
.size_granularity = MLXSW_SP_CHUNK_MAX,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY,
};
static struct devlink_resource_size_params mlxsw_sp_kvdl_large_chunks_size_params = {
.size_min = 0,
.size_granularity = MLXSW_SP_LARGE_CHUNK_MAX,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY,
};
static void
mlxsw_sp_kvdl_resource_size_params_prepare(struct devlink *devlink)
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct devlink *devlink = priv_to_devlink(mlxsw_core);
static struct devlink_resource_size_params size_params;
u32 kvdl_max_size;
int err;
kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
mlxsw_sp_kvdl_single_size_params.size_max = kvdl_max_size;
mlxsw_sp_kvdl_chunks_size_params.size_max = kvdl_max_size;
mlxsw_sp_kvdl_large_chunks_size_params.size_max = kvdl_max_size;
}
int mlxsw_sp_kvdl_resources_register(struct devlink *devlink)
{
int err;
mlxsw_sp_kvdl_resource_size_params_prepare(devlink);
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
MLXSW_SP_KVDL_SINGLE_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&mlxsw_sp_kvdl_single_size_params,
&size_params,
&mlxsw_sp_kvdl_single_ops);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
MLXSW_SP_KVDL_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&mlxsw_sp_kvdl_chunks_size_params,
&size_params,
&mlxsw_sp_kvdl_chunks_ops);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&mlxsw_sp_kvdl_large_chunks_size_params,
&size_params,
&mlxsw_sp_kvdl_chunks_large_ops);
return err;
}
......
......@@ -510,7 +510,6 @@ static const struct mlxsw_config_profile mlxsw_sib_config_profile = {
.type = MLXSW_PORT_SWID_TYPE_IB,
}
},
.resource_query_enable = 0,
};
static struct mlxsw_driver mlxsw_sib_driver = {
......
......@@ -1706,7 +1706,6 @@ static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
.type = MLXSW_PORT_SWID_TYPE_IB,
}
},
.resource_query_enable = 0,
};
static struct mlxsw_driver mlxsw_sx_driver = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment