Commit 3d759e9e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'devlink-code-split-and-structured-instance-walk'

Jakub Kicinski says:

====================
devlink: code split and structured instance walk

Split devlink.c into a handful of files, trying to keep the "core"
code away from all the command-specific implementations.
The core code has been quite scattered until now. Going forward we can
consider using a source file per-subobject, I think that it's quite
beneficial to newcomers (based on relative ease with which folks
contribute to ethtool vs devlink). But this series doesn't split
everything out, yet - partially due to backporting concerns,
but mostly due to lack of time. Bulk of the netlink command
handling is left in a leftover.c file.

Introduce a context structure for dumps, and use it to store
the devlink instance ID of the last dumped devlink instance.
This means we don't have to restart the walk from 0 each time.

Finally - introduce a "structured walk". A centralized dump handler
in devlink/netlink.c which walks the devlink instances, deals with
refcounting/locking, simplifying the per-object implementations quite
a bit. Inspired by the ethtool code.

v1: https://lore.kernel.org/all/20230104041636.226398-1-kuba@kernel.org/
RFC: https://lore.kernel.org/all/20221215020155.1619839-1-kuba@kernel.org/
====================

Link: https://lore.kernel.org/r/20230105040531.353563-1-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 0da68553 5ce76d78
......@@ -6099,7 +6099,7 @@ S: Supported
F: Documentation/networking/devlink
F: include/net/devlink.h
F: include/uapi/linux/devlink.h
F: net/core/devlink.c
F: net/devlink/
DH ELECTRONICS IMX6 DHCOM/DHCOR BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
......
......@@ -263,6 +263,10 @@ struct netlink_callback {
};
};
#define NL_ASSET_DUMP_CTX_FITS(type_name) \
BUILD_BUG_ON(sizeof(type_name) > \
sizeof_field(struct netlink_callback, ctx))
struct netlink_notify {
struct net *net;
u32 portid;
......
......@@ -23,6 +23,7 @@ obj-$(CONFIG_BPFILTER) += bpfilter/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_BRIDGE) += bridge/
obj-$(CONFIG_NET_DEVLINK) += devlink/
obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_ATALK) += appletalk/
obj-$(CONFIG_X25) += x25/
......
......@@ -33,7 +33,6 @@ obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o
obj-$(CONFIG_DST_CACHE) += dst_cache.o
obj-$(CONFIG_HWBM) += hwbm.o
obj-$(CONFIG_NET_DEVLINK) += devlink.o
obj-$(CONFIG_GRO_CELLS) += gro_cells.o
obj-$(CONFIG_FAILOVER) += failover.o
obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
......
# SPDX-License-Identifier: GPL-2.0
obj-y := leftover.o core.o netlink.o
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
*/
#include <net/genetlink.h>
#include "devl_internal.h"
DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
void *devlink_priv(struct devlink *devlink)
{
return &devlink->priv;
}
EXPORT_SYMBOL_GPL(devlink_priv);
struct devlink *priv_to_devlink(void *priv)
{
return container_of(priv, struct devlink, priv);
}
EXPORT_SYMBOL_GPL(priv_to_devlink);
struct device *devlink_to_dev(const struct devlink *devlink)
{
return devlink->dev;
}
EXPORT_SYMBOL_GPL(devlink_to_dev);
struct net *devlink_net(const struct devlink *devlink)
{
return read_pnet(&devlink->_net);
}
EXPORT_SYMBOL_GPL(devlink_net);
void devl_assert_locked(struct devlink *devlink)
{
lockdep_assert_held(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_assert_locked);
#ifdef CONFIG_LOCKDEP
/* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
bool devl_lock_is_held(struct devlink *devlink)
{
return lockdep_is_held(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_lock_is_held);
#endif
void devl_lock(struct devlink *devlink)
{
mutex_lock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_lock);
int devl_trylock(struct devlink *devlink)
{
return mutex_trylock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_trylock);
void devl_unlock(struct devlink *devlink)
{
mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_unlock);
struct devlink *__must_check devlink_try_get(struct devlink *devlink)
{
if (refcount_inc_not_zero(&devlink->refcount))
return devlink;
return NULL;
}
static void __devlink_put_rcu(struct rcu_head *head)
{
struct devlink *devlink = container_of(head, struct devlink, rcu);
complete(&devlink->comp);
}
void devlink_put(struct devlink *devlink)
{
if (refcount_dec_and_test(&devlink->refcount))
/* Make sure unregister operation that may await the completion
* is unblocked only after all users are after the end of
* RCU grace period.
*/
call_rcu(&devlink->rcu, __devlink_put_rcu);
}
struct devlink *
devlinks_xa_find_get(struct net *net, unsigned long *indexp,
void * (*xa_find_fn)(struct xarray *, unsigned long *,
unsigned long, xa_mark_t))
{
struct devlink *devlink;
rcu_read_lock();
retry:
devlink = xa_find_fn(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
if (!devlink)
goto unlock;
/* In case devlink_unregister() was already called and "unregistering"
* mark was set, do not allow to get a devlink reference here.
* This prevents live-lock of devlink_unregister() wait for completion.
*/
if (xa_get_mark(&devlinks, *indexp, DEVLINK_UNREGISTERING))
goto retry;
/* For a possible retry, the xa_find_after() should be always used */
xa_find_fn = xa_find_after;
if (!devlink_try_get(devlink))
goto retry;
if (!net_eq(devlink_net(devlink), net)) {
devlink_put(devlink);
goto retry;
}
unlock:
rcu_read_unlock();
return devlink;
}
struct devlink *
devlinks_xa_find_get_first(struct net *net, unsigned long *indexp)
{
return devlinks_xa_find_get(net, indexp, xa_find);
}
struct devlink *
devlinks_xa_find_get_next(struct net *net, unsigned long *indexp)
{
return devlinks_xa_find_get(net, indexp, xa_find_after);
}
/**
* devlink_set_features - Set devlink supported features
*
* @devlink: devlink
* @features: devlink support features
*
* This interface allows us to set reload ops separatelly from
* the devlink_alloc.
*/
void devlink_set_features(struct devlink *devlink, u64 features)
{
ASSERT_DEVLINK_NOT_REGISTERED(devlink);
WARN_ON(features & DEVLINK_F_RELOAD &&
!devlink_reload_supported(devlink->ops));
devlink->features = features;
}
EXPORT_SYMBOL_GPL(devlink_set_features);
/**
* devlink_register - Register devlink instance
*
* @devlink: devlink
*/
void devlink_register(struct devlink *devlink)
{
ASSERT_DEVLINK_NOT_REGISTERED(devlink);
/* Make sure that we are in .probe() routine */
xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
devlink_notify_register(devlink);
}
EXPORT_SYMBOL_GPL(devlink_register);
/**
* devlink_unregister - Unregister devlink instance
*
* @devlink: devlink
*/
void devlink_unregister(struct devlink *devlink)
{
ASSERT_DEVLINK_REGISTERED(devlink);
/* Make sure that we are in .remove() routine */
xa_set_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
devlink_put(devlink);
wait_for_completion(&devlink->comp);
devlink_notify_unregister(devlink);
xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
xa_clear_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
}
EXPORT_SYMBOL_GPL(devlink_unregister);
/**
* devlink_alloc_ns - Allocate new devlink instance resources
* in specific namespace
*
* @ops: ops
* @priv_size: size of user private data
* @net: net namespace
* @dev: parent device
*
* Allocate new devlink instance resources, including devlink index
* and name.
*/
struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
size_t priv_size, struct net *net,
struct device *dev)
{
struct devlink *devlink;
static u32 last_id;
int ret;
WARN_ON(!ops || !dev);
if (!devlink_reload_actions_valid(ops))
return NULL;
devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
if (!devlink)
return NULL;
ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
&last_id, GFP_KERNEL);
if (ret < 0)
goto err_xa_alloc;
devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event;
ret = register_netdevice_notifier_net(net, &devlink->netdevice_nb);
if (ret)
goto err_register_netdevice_notifier;
devlink->dev = dev;
devlink->ops = ops;
xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
write_pnet(&devlink->_net, net);
INIT_LIST_HEAD(&devlink->rate_list);
INIT_LIST_HEAD(&devlink->linecard_list);
INIT_LIST_HEAD(&devlink->sb_list);
INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
INIT_LIST_HEAD(&devlink->resource_list);
INIT_LIST_HEAD(&devlink->param_list);
INIT_LIST_HEAD(&devlink->region_list);
INIT_LIST_HEAD(&devlink->reporter_list);
INIT_LIST_HEAD(&devlink->trap_list);
INIT_LIST_HEAD(&devlink->trap_group_list);
INIT_LIST_HEAD(&devlink->trap_policer_list);
lockdep_register_key(&devlink->lock_key);
mutex_init(&devlink->lock);
lockdep_set_class(&devlink->lock, &devlink->lock_key);
mutex_init(&devlink->reporters_lock);
mutex_init(&devlink->linecards_lock);
refcount_set(&devlink->refcount, 1);
init_completion(&devlink->comp);
return devlink;
err_register_netdevice_notifier:
xa_erase(&devlinks, devlink->index);
err_xa_alloc:
kfree(devlink);
return NULL;
}
EXPORT_SYMBOL_GPL(devlink_alloc_ns);
/**
* devlink_free - Free devlink instance resources
*
* @devlink: devlink
*/
void devlink_free(struct devlink *devlink)
{
ASSERT_DEVLINK_NOT_REGISTERED(devlink);
mutex_destroy(&devlink->linecards_lock);
mutex_destroy(&devlink->reporters_lock);
mutex_destroy(&devlink->lock);
lockdep_unregister_key(&devlink->lock_key);
WARN_ON(!list_empty(&devlink->trap_policer_list));
WARN_ON(!list_empty(&devlink->trap_group_list));
WARN_ON(!list_empty(&devlink->trap_list));
WARN_ON(!list_empty(&devlink->reporter_list));
WARN_ON(!list_empty(&devlink->region_list));
WARN_ON(!list_empty(&devlink->param_list));
WARN_ON(!list_empty(&devlink->resource_list));
WARN_ON(!list_empty(&devlink->dpipe_table_list));
WARN_ON(!list_empty(&devlink->sb_list));
WARN_ON(!list_empty(&devlink->rate_list));
WARN_ON(!list_empty(&devlink->linecard_list));
WARN_ON(!xa_empty(&devlink->ports));
xa_destroy(&devlink->snapshot_ids);
xa_destroy(&devlink->ports);
WARN_ON_ONCE(unregister_netdevice_notifier_net(devlink_net(devlink),
&devlink->netdevice_nb));
xa_erase(&devlinks, devlink->index);
kfree(devlink);
}
EXPORT_SYMBOL_GPL(devlink_free);
static void __net_exit devlink_pernet_pre_exit(struct net *net)
{
struct devlink *devlink;
u32 actions_performed;
unsigned long index;
int err;
/* In case network namespace is getting destroyed, reload
* all devlink instances from this namespace into init_net.
*/
devlinks_xa_for_each_registered_get(net, index, devlink) {
WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
mutex_lock(&devlink->lock);
err = devlink_reload(devlink, &init_net,
DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
DEVLINK_RELOAD_LIMIT_UNSPEC,
&actions_performed, NULL);
mutex_unlock(&devlink->lock);
if (err && err != -EOPNOTSUPP)
pr_warn("Failed to reload devlink instance into init_net\n");
devlink_put(devlink);
}
}
static struct pernet_operations devlink_pernet_ops __net_initdata = {
.pre_exit = devlink_pernet_pre_exit,
};
static int __init devlink_init(void)
{
int err;
err = genl_register_family(&devlink_nl_family);
if (err)
goto out;
err = register_pernet_subsys(&devlink_pernet_ops);
out:
WARN_ON(err);
return err;
}
subsys_initcall(devlink_init);
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
*/
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/types.h>
#include <linux/xarray.h>
#include <net/devlink.h>
#include <net/net_namespace.h>
#define DEVLINK_REGISTERED XA_MARK_1
#define DEVLINK_UNREGISTERING XA_MARK_2
#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
(__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
struct devlink_dev_stats {
u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
};
struct devlink {
u32 index;
struct xarray ports;
struct list_head rate_list;
struct list_head sb_list;
struct list_head dpipe_table_list;
struct list_head resource_list;
struct list_head param_list;
struct list_head region_list;
struct list_head reporter_list;
struct mutex reporters_lock; /* protects reporter_list */
struct devlink_dpipe_headers *dpipe_headers;
struct list_head trap_list;
struct list_head trap_group_list;
struct list_head trap_policer_list;
struct list_head linecard_list;
struct mutex linecards_lock; /* protects linecard_list */
const struct devlink_ops *ops;
u64 features;
struct xarray snapshot_ids;
struct devlink_dev_stats stats;
struct device *dev;
possible_net_t _net;
/* Serializes access to devlink instance specific objects such as
* port, sb, dpipe, resource, params, region, traps and more.
*/
struct mutex lock;
struct lock_class_key lock_key;
u8 reload_failed:1;
refcount_t refcount;
struct completion comp;
struct rcu_head rcu;
struct notifier_block netdevice_nb;
char priv[] __aligned(NETDEV_ALIGN);
};
extern struct xarray devlinks;
extern struct genl_family devlink_nl_family;
/* devlink instances are open to the access from the user space after
* devlink_register() call. Such logical barrier allows us to have certain
* expectations related to locking.
*
* Before *_register() - we are in initialization stage and no parallel
* access possible to the devlink instance. All drivers perform that phase
* by implicitly holding device_lock.
*
* After *_register() - users and driver can access devlink instance at
* the same time.
*/
#define ASSERT_DEVLINK_REGISTERED(d) \
WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
#define ASSERT_DEVLINK_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
/* Iterate over devlink pointers which were possible to get reference to.
* devlink_put() needs to be called for each iterated devlink pointer
* in loop body in order to release the reference.
*/
#define devlinks_xa_for_each_registered_get(net, index, devlink) \
for (index = 0, \
devlink = devlinks_xa_find_get_first(net, &index); \
devlink; devlink = devlinks_xa_find_get_next(net, &index))
struct devlink *
devlinks_xa_find_get(struct net *net, unsigned long *indexp,
void * (*xa_find_fn)(struct xarray *, unsigned long *,
unsigned long, xa_mark_t));
struct devlink *
devlinks_xa_find_get_first(struct net *net, unsigned long *indexp);
struct devlink *
devlinks_xa_find_get_next(struct net *net, unsigned long *indexp);
/* Netlink */
#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
#define DEVLINK_NL_FLAG_NEED_RATE BIT(2)
#define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
#define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4)
enum devlink_multicast_groups {
DEVLINK_MCGRP_CONFIG,
};
/* state held across netlink dumps */
struct devlink_nl_dump_state {
unsigned long instance;
int idx;
union {
/* DEVLINK_CMD_REGION_READ */
struct {
u64 start_offset;
};
/* DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET */
struct {
u64 dump_ts;
};
};
};
struct devlink_gen_cmd {
int (*dump_one)(struct sk_buff *msg, struct devlink *devlink,
struct netlink_callback *cb);
};
/* Iterate over registered devlink instances for devlink dump.
* devlink_put() needs to be called for each iterated devlink pointer
* in loop body in order to release the reference.
* Note: this is NOT a generic iterator, it makes assumptions about the use
* of @state and can only be used once per dumpit implementation.
*/
#define devlink_dump_for_each_instance_get(msg, state, devlink) \
for (; (devlink = devlinks_xa_find_get(sock_net(msg->sk), \
&state->instance, xa_find)); \
state->instance++, state->idx = 0)
extern const struct genl_small_ops devlink_nl_ops[56];
struct devlink *devlink_get_from_attrs(struct net *net, struct nlattr **attrs);
void devlink_notify_unregister(struct devlink *devlink);
void devlink_notify_register(struct devlink *devlink);
int devlink_nl_instance_iter_dump(struct sk_buff *msg,
struct netlink_callback *cb);
static inline struct devlink_nl_dump_state *
devlink_dump_state(struct netlink_callback *cb)
{
NL_ASSET_DUMP_CTX_FITS(struct devlink_nl_dump_state);
return (struct devlink_nl_dump_state *)cb->ctx;
}
/* gen cmds */
extern const struct devlink_gen_cmd devl_gen_inst;
extern const struct devlink_gen_cmd devl_gen_port;
extern const struct devlink_gen_cmd devl_gen_sb;
extern const struct devlink_gen_cmd devl_gen_sb_pool;
extern const struct devlink_gen_cmd devl_gen_sb_port_pool;
extern const struct devlink_gen_cmd devl_gen_sb_tc_pool_bind;
extern const struct devlink_gen_cmd devl_gen_selftests;
extern const struct devlink_gen_cmd devl_gen_param;
extern const struct devlink_gen_cmd devl_gen_region;
extern const struct devlink_gen_cmd devl_gen_info;
extern const struct devlink_gen_cmd devl_gen_trap;
extern const struct devlink_gen_cmd devl_gen_trap_group;
extern const struct devlink_gen_cmd devl_gen_trap_policer;
/* Ports */
int devlink_port_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr);
struct devlink_port *
devlink_port_get_from_info(struct devlink *devlink, struct genl_info *info);
/* Reload */
bool devlink_reload_actions_valid(const struct devlink_ops *ops);
int devlink_reload(struct devlink *devlink, struct net *dest_net,
enum devlink_reload_action action,
enum devlink_reload_limit limit,
u32 *actions_performed, struct netlink_ext_ack *extack);
static inline bool devlink_reload_supported(const struct devlink_ops *ops)
{
return ops->reload_down && ops->reload_up;
}
/* Line cards */
struct devlink_linecard;
struct devlink_linecard *
devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info);
void devlink_linecard_put(struct devlink_linecard *linecard);
/* Rates */
extern const struct devlink_gen_cmd devl_gen_rate_get;
struct devlink_rate *
devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info);
struct devlink_rate *
devlink_rate_node_get_from_info(struct devlink *devlink,
struct genl_info *info);
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
*/
#include <net/genetlink.h>
#include <net/sock.h>
#include "devl_internal.h"
static const struct genl_multicast_group devlink_nl_mcgrps[] = {
[DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
};
static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_UNSPEC] = { .strict_start_type =
DEVLINK_ATTR_TRAP_POLICER_ID },
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
[DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
DEVLINK_PORT_TYPE_IB),
[DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
[DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
[DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
[DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
[DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
DEVLINK_ESWITCH_MODE_SWITCHDEV),
[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
[DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
[DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
[DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
[DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
[DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
[DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
[DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] =
NLA_POLICY_BITFIELD32(DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS),
[DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
[DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
[DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
[DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8 },
[DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
[DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
[DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
[DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
[DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
DEVLINK_RELOAD_ACTION_MAX),
[DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(DEVLINK_RELOAD_LIMITS_VALID_MASK),
[DEVLINK_ATTR_PORT_FLAVOUR] = { .type = NLA_U16 },
[DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16 },
[DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32 },
[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32 },
[DEVLINK_ATTR_RATE_TYPE] = { .type = NLA_U16 },
[DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64 },
[DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
[DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
[DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
[DEVLINK_ATTR_RATE_TX_PRIORITY] = { .type = NLA_U32 },
[DEVLINK_ATTR_RATE_TX_WEIGHT] = { .type = NLA_U32 },
[DEVLINK_ATTR_REGION_DIRECT] = { .type = NLA_FLAG },
};
struct devlink *devlink_get_from_attrs(struct net *net, struct nlattr **attrs)
{
struct devlink *devlink;
unsigned long index;
char *busname;
char *devname;
if (!attrs[DEVLINK_ATTR_BUS_NAME] || !attrs[DEVLINK_ATTR_DEV_NAME])
return ERR_PTR(-EINVAL);
busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
devlinks_xa_for_each_registered_get(net, index, devlink) {
if (strcmp(devlink->dev->bus->name, busname) == 0 &&
strcmp(dev_name(devlink->dev), devname) == 0)
return devlink;
devlink_put(devlink);
}
return ERR_PTR(-ENODEV);
}
static int devlink_nl_pre_doit(const struct genl_split_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
struct devlink_linecard *linecard;
struct devlink_port *devlink_port;
struct devlink *devlink;
int err;
devlink = devlink_get_from_attrs(genl_info_net(info), info->attrs);
if (IS_ERR(devlink))
return PTR_ERR(devlink);
devl_lock(devlink);
info->user_ptr[0] = devlink;
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
devlink_port = devlink_port_get_from_info(devlink, info);
if (IS_ERR(devlink_port)) {
err = PTR_ERR(devlink_port);
goto unlock;
}
info->user_ptr[1] = devlink_port;
} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
devlink_port = devlink_port_get_from_info(devlink, info);
if (!IS_ERR(devlink_port))
info->user_ptr[1] = devlink_port;
} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE) {
struct devlink_rate *devlink_rate;
devlink_rate = devlink_rate_get_from_info(devlink, info);
if (IS_ERR(devlink_rate)) {
err = PTR_ERR(devlink_rate);
goto unlock;
}
info->user_ptr[1] = devlink_rate;
} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE_NODE) {
struct devlink_rate *rate_node;
rate_node = devlink_rate_node_get_from_info(devlink, info);
if (IS_ERR(rate_node)) {
err = PTR_ERR(rate_node);
goto unlock;
}
info->user_ptr[1] = rate_node;
} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
linecard = devlink_linecard_get_from_info(devlink, info);
if (IS_ERR(linecard)) {
err = PTR_ERR(linecard);
goto unlock;
}
info->user_ptr[1] = linecard;
}
return 0;
unlock:
devl_unlock(devlink);
devlink_put(devlink);
return err;
}
static void devlink_nl_post_doit(const struct genl_split_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
struct devlink_linecard *linecard;
struct devlink *devlink;
devlink = info->user_ptr[0];
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
linecard = info->user_ptr[1];
devlink_linecard_put(linecard);
}
devl_unlock(devlink);
devlink_put(devlink);
}
static const struct devlink_gen_cmd *devl_gen_cmds[] = {
[DEVLINK_CMD_GET] = &devl_gen_inst,
[DEVLINK_CMD_PORT_GET] = &devl_gen_port,
[DEVLINK_CMD_SB_GET] = &devl_gen_sb,
[DEVLINK_CMD_SB_POOL_GET] = &devl_gen_sb_pool,
[DEVLINK_CMD_SB_PORT_POOL_GET] = &devl_gen_sb_port_pool,
[DEVLINK_CMD_SB_TC_POOL_BIND_GET] = &devl_gen_sb_tc_pool_bind,
[DEVLINK_CMD_PARAM_GET] = &devl_gen_param,
[DEVLINK_CMD_REGION_GET] = &devl_gen_region,
[DEVLINK_CMD_INFO_GET] = &devl_gen_info,
[DEVLINK_CMD_RATE_GET] = &devl_gen_rate_get,
[DEVLINK_CMD_TRAP_GET] = &devl_gen_trap,
[DEVLINK_CMD_TRAP_GROUP_GET] = &devl_gen_trap_group,
[DEVLINK_CMD_TRAP_POLICER_GET] = &devl_gen_trap_policer,
[DEVLINK_CMD_SELFTESTS_GET] = &devl_gen_selftests,
};
int devlink_nl_instance_iter_dump(struct sk_buff *msg,
struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
const struct devlink_gen_cmd *cmd;
struct devlink *devlink;
int err = 0;
cmd = devl_gen_cmds[info->op.cmd];
devlink_dump_for_each_instance_get(msg, state, devlink) {
devl_lock(devlink);
err = cmd->dump_one(msg, devlink, cb);
devl_unlock(devlink);
devlink_put(devlink);
if (err)
break;
/* restart sub-object walk for the next instance */
state->idx = 0;
}
if (err != -EMSGSIZE)
return err;
return msg->len;
}
struct genl_family devlink_nl_family __ro_after_init = {
.name = DEVLINK_GENL_NAME,
.version = DEVLINK_GENL_VERSION,
.maxattr = DEVLINK_ATTR_MAX,
.policy = devlink_nl_policy,
.netnsok = true,
.parallel_ops = true,
.pre_doit = devlink_nl_pre_doit,
.post_doit = devlink_nl_post_doit,
.module = THIS_MODULE,
.small_ops = devlink_nl_ops,
.n_small_ops = ARRAY_SIZE(devlink_nl_ops),
.resv_start_op = DEVLINK_CMD_SELFTESTS_RUN + 1,
.mcgrps = devlink_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(devlink_nl_mcgrps),
};
......@@ -3866,7 +3866,7 @@ static int __init ctnetlink_init(void)
{
int ret;
BUILD_BUG_ON(sizeof(struct ctnetlink_list_dump_ctx) > sizeof_field(struct netlink_callback, ctx));
NL_ASSET_DUMP_CTX_FITS(struct ctnetlink_list_dump_ctx);
ret = nfnetlink_subsys_register(&ctnl_subsys);
if (ret < 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment