Commit b1351527 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'devlink-expose-instance-locking-and-simplify-port-splitting'

Jakub Kicinski says:

====================
devlink: expose instance locking and simplify port splitting

This series puts the devlink ports fully under the devlink instance
lock's protection. As discussed in the past it implements my preferred
solution of exposing the instance lock to the drivers. This way drivers
which want to support port splitting can lock the devlink instance
themselves on the probe path, and we can take that lock in the core
on the split/unsplit paths.

nfp and mlxsw are converted, with slightly deeper changes done in
nfp since I'm more familiar with that driver.

Now that the devlink port is protected we can pass a pointer to
the drivers, instead of passing a port index and forcing the drivers
to do their own lookups. Both nfp and mlxsw can container_of() to
their own structures.
====================

Link: https://lore.kernel.org/r/20220315060009.1028519-1-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 49045b9c 706217c1
......@@ -4,6 +4,22 @@ Linux Devlink Documentation
devlink is an API to expose device information and resources not directly
related to any device class, such as chip-wide/switch-ASIC-wide configuration.
Locking
-------
Driver facing APIs are currently transitioning to allow more explicit
locking. Drivers can use the existing ``devlink_*`` set of APIs, or
new APIs prefixed by ``devl_*``. The older APIs handle all the locking
in devlink core, but don't allow registration of most sub-objects once
the main devlink object is itself registered. The newer ``devl_*`` APIs assume
the devlink instance lock is already held. Drivers can take the instance
lock by calling ``devl_lock()``. It is also held in most of the callbacks.
Eventually all callbacks will be invoked under the devlink instance lock,
refer to the use of the ``DEVLINK_NL_FLAG_NO_LOCK`` flag in devlink core
to find out which callbacks are not converted, yet.
Drivers are encouraged to use the devlink instance lock for their own needs.
Interface documentation
-----------------------
......
......@@ -1217,36 +1217,37 @@ static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
ARRAY_SIZE(mlxsw_core_fw_devlink_params));
}
static void *__dl_port(struct devlink_port *devlink_port)
{
return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
}
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
struct devlink_port *port,
unsigned int count,
struct netlink_ext_ack *extack)
{
struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
if (port_index >= mlxsw_core->max_ports) {
NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
return -EINVAL;
}
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
extack);
return mlxsw_core->driver->port_split(mlxsw_core,
mlxsw_core_port->local_port,
count, extack);
}
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
unsigned int port_index,
struct devlink_port *port,
struct netlink_ext_ack *extack)
{
struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
if (port_index >= mlxsw_core->max_ports) {
NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
return -EINVAL;
}
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
return mlxsw_core->driver->port_unsplit(mlxsw_core,
mlxsw_core_port->local_port,
extack);
}
......@@ -1280,11 +1281,6 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
extack);
}
static void *__dl_port(struct devlink_port *devlink_port)
{
return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
}
static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
enum devlink_port_type port_type)
{
......@@ -2983,7 +2979,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
attrs.switch_id.id_len = switch_id_len;
mlxsw_core_port->local_port = local_port;
devlink_port_attrs_set(devlink_port, &attrs);
err = devlink_port_register(devlink, devlink_port, local_port);
err = devl_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
return err;
......@@ -2995,7 +2991,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
devlink_port_unregister(devlink_port);
devl_port_unregister(devlink_port);
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
}
......
......@@ -422,6 +422,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_m->core = mlxsw_core;
......@@ -437,7 +438,9 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
devl_lock(devlink);
err = mlxsw_m_ports_create(mlxsw_m);
devl_unlock(devlink);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
return err;
......@@ -449,8 +452,11 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
struct devlink *devlink = priv_to_devlink(mlxsw_core);
devl_lock(devlink);
mlxsw_m_ports_remove(mlxsw_m);
devl_unlock(devlink);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
......
......@@ -2818,6 +2818,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
......@@ -2978,7 +2979,9 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_sample_trigger_init;
}
devl_lock(devlink);
err = mlxsw_sp_ports_create(mlxsw_sp);
devl_unlock(devlink);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
goto err_ports_create;
......@@ -3159,8 +3162,12 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct devlink *devlink = priv_to_devlink(mlxsw_core);
devl_lock(devlink);
mlxsw_sp_ports_remove(mlxsw_sp);
devl_unlock(devlink);
rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
......
......@@ -266,7 +266,7 @@ nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
int i, err, count = 0;
reprs = rcu_dereference_protected(app->reprs[type],
lockdep_is_held(&app->pf->lock));
nfp_app_is_locked(app));
if (!reprs)
return 0;
......@@ -295,7 +295,7 @@ nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
if (!tot_repl)
return 0;
lockdep_assert_held(&app->pf->lock);
assert_nfp_app_locked(app);
if (!wait_event_timeout(priv->reify_wait_queue,
atomic_read(replies) >= tot_repl,
NFP_FL_REPLY_TIMEOUT)) {
......
......@@ -121,7 +121,7 @@ struct nfp_reprs *
nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
{
return rcu_dereference_protected(app->reprs[type],
lockdep_is_held(&app->pf->lock));
nfp_app_is_locked(app));
}
struct nfp_reprs *
......
......@@ -75,7 +75,7 @@ extern const struct nfp_app_type app_abm;
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
* @eswitch_mode_get: get SR-IOV eswitch mode
* @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
* @eswitch_mode_set: set SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
* @dev_get: get representor or internal port representing netdev
......@@ -174,6 +174,16 @@ struct nfp_app {
void *priv;
};
static inline void assert_nfp_app_locked(struct nfp_app *app)
{
devl_assert_locked(priv_to_devlink(app->pf));
}
static inline bool nfp_app_is_locked(struct nfp_app *app)
{
return devl_lock_is_held(priv_to_devlink(app->pf));
}
void nfp_check_rhashtable_empty(void *ptr, void *arg);
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
......
......@@ -26,12 +26,11 @@ nfp_devlink_fill_eth_port(struct nfp_port *port,
}
static int
nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index,
nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf,
struct devlink_port *dl_port,
struct nfp_eth_table_port *copy)
{
struct nfp_port *port;
port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index);
struct nfp_port *port = container_of(dl_port, struct nfp_port, dl_port);
return nfp_devlink_fill_eth_port(port, copy);
}
......@@ -62,7 +61,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
}
static int
nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
nfp_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
unsigned int count, struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
......@@ -70,33 +69,25 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
mutex_lock(&pf->lock);
rtnl_lock();
ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
rtnl_unlock();
if (ret)
goto out;
return ret;
if (eth_port.port_lanes % count) {
ret = -EINVAL;
goto out;
}
if (eth_port.port_lanes % count)
return -EINVAL;
/* Special case the 100G CXP -> 2x40G split */
lanes = eth_port.port_lanes / count;
if (eth_port.lanes == 10 && count == 2)
lanes = 8 / count;
ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
out:
mutex_unlock(&pf->lock);
return ret;
return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
nfp_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
......@@ -104,29 +95,21 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
mutex_lock(&pf->lock);
rtnl_lock();
ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
rtnl_unlock();
if (ret)
goto out;
return ret;
if (!eth_port.is_split) {
ret = -EINVAL;
goto out;
}
if (!eth_port.is_split)
return -EINVAL;
/* Special case the 100G CXP -> 2x40G unsplit */
lanes = eth_port.port_lanes;
if (eth_port.port_lanes == 8)
lanes = 10;
ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
out:
mutex_unlock(&pf->lock);
return ret;
return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
......@@ -163,9 +146,9 @@ static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct nfp_pf *pf = devlink_priv(devlink);
int ret;
mutex_lock(&pf->lock);
devl_lock(devlink);
ret = nfp_app_eswitch_mode_set(pf->app, mode);
mutex_unlock(&pf->lock);
devl_unlock(devlink);
return ret;
}
......@@ -375,12 +358,12 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
devlink = priv_to_devlink(app->pf);
return devlink_port_register(devlink, &port->dl_port, port->eth_id);
return devl_port_register(devlink, &port->dl_port, port->eth_id);
}
void nfp_devlink_port_unregister(struct nfp_port *port)
{
devlink_port_unregister(&port->dl_port);
devl_port_unregister(&port->dl_port);
}
void nfp_devlink_port_type_eth_set(struct nfp_port *port)
......
......@@ -227,6 +227,7 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
struct devlink *devlink;
int err;
if (num_vfs > pf->limit_vfs) {
......@@ -241,7 +242,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
mutex_lock(&pf->lock);
devlink = priv_to_devlink(pf);
devl_lock(devlink);
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
......@@ -255,11 +257,11 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
mutex_unlock(&pf->lock);
devl_unlock(devlink);
return num_vfs;
err_sriov_disable:
mutex_unlock(&pf->lock);
devl_unlock(devlink);
pci_disable_sriov(pdev);
return err;
#endif
......@@ -270,8 +272,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
struct devlink *devlink;
mutex_lock(&pf->lock);
devlink = priv_to_devlink(pf);
devl_lock(devlink);
/* If the VFs are assigned we cannot shut down SR-IOV without
* causing issues, so just leave the hardware available but
......@@ -279,7 +283,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
*/
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
mutex_unlock(&pf->lock);
devl_unlock(devlink);
return -EPERM;
}
......@@ -287,7 +291,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
pf->num_vfs = 0;
mutex_unlock(&pf->lock);
devl_unlock(devlink);
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
......@@ -707,7 +711,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf = devlink_priv(devlink);
INIT_LIST_HEAD(&pf->vnics);
INIT_LIST_HEAD(&pf->ports);
mutex_init(&pf->lock);
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
pf->dev_info = dev_info;
......@@ -798,7 +801,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
destroy_workqueue(pf->wq);
err_pci_priv_unset:
pci_set_drvdata(pdev, NULL);
mutex_destroy(&pf->lock);
devlink_free(devlink);
err_rel_regions:
pci_release_regions(pdev);
......@@ -835,7 +837,6 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
kfree(pf->eth_tbl);
kfree(pf->nspi);
mutex_destroy(&pf->lock);
devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
......
......@@ -13,7 +13,6 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <net/devlink.h>
......@@ -85,7 +84,8 @@ struct nfp_dumpspec {
* @port_refresh_work: Work entry for taking netdevs out
* @shared_bufs: Array of shared buffer structures if FW has any SBs
* @num_shared_bufs: Number of elements in @shared_bufs
* @lock: Protects all fields which may change after probe
*
* Fields which may change after proble are protected by devlink instance lock.
*/
struct nfp_pf {
struct pci_dev *pdev;
......@@ -141,8 +141,6 @@ struct nfp_pf {
struct nfp_shared_buf *shared_bufs;
unsigned int num_shared_bufs;
struct mutex lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
......
......@@ -308,6 +308,7 @@ static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
static int
nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
{
struct devlink *devlink = priv_to_devlink(pf);
u8 __iomem *ctrl_bar;
int err;
......@@ -315,9 +316,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
mutex_lock(&pf->lock);
devl_lock(devlink);
err = nfp_app_init(pf->app);
mutex_unlock(&pf->lock);
devl_unlock(devlink);
if (err)
goto err_free;
......@@ -344,9 +345,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
mutex_lock(&pf->lock);
devl_lock(devlink);
nfp_app_clean(pf->app);
mutex_unlock(&pf->lock);
devl_unlock(devlink);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
......@@ -355,14 +356,16 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
static void nfp_net_pf_app_clean(struct nfp_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
if (pf->ctrl_vnic) {
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
mutex_lock(&pf->lock);
devl_lock(devlink);
nfp_app_clean(pf->app);
mutex_unlock(&pf->lock);
devl_unlock(devlink);
nfp_app_free(pf->app);
pf->app = NULL;
......@@ -548,12 +551,13 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
struct nfp_port *port;
int err;
lockdep_assert_held(&pf->lock);
devl_assert_locked(devlink);
/* Check for nfp_net_pci_remove() racing against us */
if (list_empty(&pf->vnics))
......@@ -602,10 +606,11 @@ static void nfp_net_refresh_vnics(struct work_struct *work)
{
struct nfp_pf *pf = container_of(work, struct nfp_pf,
port_refresh_work);
struct devlink *devlink = priv_to_devlink(pf);
mutex_lock(&pf->lock);
devl_lock(devlink);
nfp_net_refresh_port_table_sync(pf);
mutex_unlock(&pf->lock);
devl_unlock(devlink);
}
void nfp_net_refresh_port_table(struct nfp_port *port)
......@@ -711,7 +716,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_shared_buf_unreg;
mutex_lock(&pf->lock);
devl_lock(devlink);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
......@@ -731,7 +736,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_stop_app;
mutex_unlock(&pf->lock);
devl_unlock(devlink);
devlink_register(devlink);
return 0;
......@@ -744,7 +749,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
mutex_unlock(&pf->lock);
devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
......@@ -758,10 +763,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net *nn, *next;
devlink_unregister(priv_to_devlink(pf));
mutex_lock(&pf->lock);
devl_lock(devlink);
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
continue;
......@@ -773,7 +779,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
mutex_unlock(&pf->lock);
devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
......
......@@ -20,7 +20,7 @@ struct net_device *
nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
{
return rcu_dereference_protected(set->reprs[id],
lockdep_is_held(&app->pf->lock));
nfp_app_is_locked(app));
}
static void
......@@ -476,7 +476,7 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
int i;
reprs = rcu_dereference_protected(app->reprs[type],
lockdep_is_held(&app->pf->lock));
nfp_app_is_locked(app));
if (!reprs)
return;
......
......@@ -75,23 +75,6 @@ int nfp_port_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
struct nfp_port *
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
{
struct nfp_port *port;
lockdep_assert_held(&pf->lock);
if (type != NFP_PORT_PHYS_PORT)
return NULL;
list_for_each_entry(port, &pf->ports, port_list)
if (port->eth_id == id)
return port;
return NULL;
}
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port)
{
if (!port)
......
......@@ -106,8 +106,6 @@ nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
int nfp_port_get_port_parent_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid);
struct nfp_port *
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port);
......
......@@ -1197,9 +1197,9 @@ struct devlink_ops {
struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
int (*port_split)(struct devlink *devlink, struct devlink_port *port,
unsigned int count, struct netlink_ext_ack *extack);
int (*port_unsplit)(struct devlink *devlink, unsigned int port_index,
int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port,
struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
......@@ -1479,6 +1479,17 @@ void *devlink_priv(struct devlink *devlink);
struct devlink *priv_to_devlink(void *priv);
struct device *devlink_to_dev(const struct devlink *devlink);
/* Devlink instance explicit locking */
void devl_lock(struct devlink *devlink);
void devl_unlock(struct devlink *devlink);
void devl_assert_locked(struct devlink *devlink);
bool devl_lock_is_held(struct devlink *devlink);
int devl_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
unsigned int port_index);
void devl_port_unregister(struct devlink_port *devlink_port);
struct ib_device;
struct net *devlink_net(const struct devlink *devlink);
......
......@@ -225,6 +225,33 @@ struct devlink *__must_check devlink_try_get(struct devlink *devlink)
return NULL;
}
void devl_assert_locked(struct devlink *devlink)
{
lockdep_assert_held(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_assert_locked);
#ifdef CONFIG_LOCKDEP
/* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
bool devl_lock_is_held(struct devlink *devlink)
{
return lockdep_is_held(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_lock_is_held);
#endif
void devl_lock(struct devlink *devlink)
{
mutex_lock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_lock);
void devl_unlock(struct devlink *devlink)
{
mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devl_unlock);
static struct devlink *devlink_get_from_attrs(struct net *net,
struct nlattr **attrs)
{
......@@ -1541,35 +1568,20 @@ static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
return 0;
}
static int devlink_port_split(struct devlink *devlink, u32 port_index,
u32 count, struct netlink_ext_ack *extack)
{
if (devlink->ops->port_split)
return devlink->ops->port_split(devlink, port_index, count,
extack);
return -EOPNOTSUPP;
}
static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink *devlink = info->user_ptr[0];
struct devlink_port *devlink_port;
u32 port_index;
u32 count;
if (!info->attrs[DEVLINK_ATTR_PORT_INDEX] ||
!info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT])
if (!info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT])
return -EINVAL;
if (!devlink->ops->port_split)
return -EOPNOTSUPP;
devlink_port = devlink_port_get_from_info(devlink, info);
port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
if (IS_ERR(devlink_port))
return -EINVAL;
if (!devlink_port->attrs.splittable) {
/* Split ports cannot be split. */
if (devlink_port->attrs.split)
......@@ -1584,29 +1596,19 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
return -EINVAL;
}
return devlink_port_split(devlink, port_index, count, info->extack);
}
static int devlink_port_unsplit(struct devlink *devlink, u32 port_index,
struct netlink_ext_ack *extack)
{
if (devlink->ops->port_unsplit)
return devlink->ops->port_unsplit(devlink, port_index, extack);
return -EOPNOTSUPP;
return devlink->ops->port_split(devlink, devlink_port, count,
info->extack);
}
static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink *devlink = info->user_ptr[0];
u32 port_index;
if (!info->attrs[DEVLINK_ATTR_PORT_INDEX])
return -EINVAL;
port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
return devlink_port_unsplit(devlink, port_index, info->extack);
if (!devlink->ops->port_unsplit)
return -EOPNOTSUPP;
return devlink->ops->port_unsplit(devlink, devlink_port, info->extack);
}
static int devlink_port_new_notifiy(struct devlink *devlink,
......@@ -8645,14 +8647,14 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_split_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_unsplit_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
.cmd = DEVLINK_CMD_PORT_NEW,
......@@ -9249,6 +9251,32 @@ static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
cancel_delayed_work_sync(&devlink_port->type_warn_dw);
}
int devl_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
unsigned int port_index)
{
lockdep_assert_held(&devlink->lock);
if (devlink_port_index_exists(devlink, port_index))
return -EEXIST;
WARN_ON(devlink_port->devlink);
devlink_port->devlink = devlink;
devlink_port->index = port_index;
spin_lock_init(&devlink_port->type_lock);
INIT_LIST_HEAD(&devlink_port->reporter_list);
mutex_init(&devlink_port->reporters_lock);
list_add_tail(&devlink_port->list, &devlink->port_list);
INIT_LIST_HEAD(&devlink_port->param_list);
INIT_LIST_HEAD(&devlink_port->region_list);
INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
devlink_port_type_warn_schedule(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
return 0;
}
EXPORT_SYMBOL_GPL(devl_port_register);
/**
* devlink_port_register - Register devlink port
*
......@@ -9266,29 +9294,28 @@ int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
unsigned int port_index)
{
mutex_lock(&devlink->lock);
if (devlink_port_index_exists(devlink, port_index)) {
mutex_unlock(&devlink->lock);
return -EEXIST;
}
int err;
WARN_ON(devlink_port->devlink);
devlink_port->devlink = devlink;
devlink_port->index = port_index;
spin_lock_init(&devlink_port->type_lock);
INIT_LIST_HEAD(&devlink_port->reporter_list);
mutex_init(&devlink_port->reporters_lock);
list_add_tail(&devlink_port->list, &devlink->port_list);
INIT_LIST_HEAD(&devlink_port->param_list);
INIT_LIST_HEAD(&devlink_port->region_list);
mutex_lock(&devlink->lock);
err = devl_port_register(devlink, devlink_port, port_index);
mutex_unlock(&devlink->lock);
INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
devlink_port_type_warn_schedule(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
return 0;
return err;
}
EXPORT_SYMBOL_GPL(devlink_port_register);
void devl_port_unregister(struct devlink_port *devlink_port)
{
lockdep_assert_held(&devlink_port->devlink->lock);
devlink_port_type_warn_cancel(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
list_del(&devlink_port->list);
WARN_ON(!list_empty(&devlink_port->reporter_list));
WARN_ON(!list_empty(&devlink_port->region_list));
mutex_destroy(&devlink_port->reporters_lock);
}
EXPORT_SYMBOL_GPL(devl_port_unregister);
/**
* devlink_port_unregister - Unregister devlink port
*
......@@ -9298,14 +9325,9 @@ void devlink_port_unregister(struct devlink_port *devlink_port)
{
struct devlink *devlink = devlink_port->devlink;
devlink_port_type_warn_cancel(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
mutex_lock(&devlink->lock);
list_del(&devlink_port->list);
devl_port_unregister(devlink_port);
mutex_unlock(&devlink->lock);
WARN_ON(!list_empty(&devlink_port->reporter_list));
WARN_ON(!list_empty(&devlink_port->region_list));
mutex_destroy(&devlink_port->reporters_lock);
}
EXPORT_SYMBOL_GPL(devlink_port_unregister);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment