Commit 602144c2 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

bpf: offload: keep the offload state per-ASIC

Create a higher-level entity to represent a device/ASIC to allow
programs and maps to be shared between device ports.  The extra
work is required to make sure we don't destroy BPF objects as
soon as the netdev for which they were loaded gets destroyed,
as other ports may still be using them.  When netdev goes away
all of its BPF objects will be moved to other netdevs of the
device, and only destroyed when last netdev is unregistered.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 9fd7c555
...@@ -406,12 +406,16 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) ...@@ -406,12 +406,16 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev) static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
{ {
return bpf_offload_dev_netdev_register(netdev); struct nfp_app_bpf *bpf = app->priv;
return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev);
} }
static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev) static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
{ {
bpf_offload_dev_netdev_unregister(netdev); struct nfp_app_bpf *bpf = app->priv;
bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
} }
static int nfp_bpf_init(struct nfp_app *app) static int nfp_bpf_init(struct nfp_app *app)
...@@ -437,6 +441,11 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -437,6 +441,11 @@ static int nfp_bpf_init(struct nfp_app *app)
if (err) if (err)
goto err_free_neutral_maps; goto err_free_neutral_maps;
bpf->bpf_dev = bpf_offload_dev_create();
err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err)
goto err_free_neutral_maps;
return 0; return 0;
err_free_neutral_maps: err_free_neutral_maps:
...@@ -455,6 +464,7 @@ static void nfp_bpf_clean(struct nfp_app *app) ...@@ -455,6 +464,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
{ {
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
bpf_offload_dev_destroy(bpf->bpf_dev);
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
......
...@@ -110,6 +110,8 @@ enum pkt_vec { ...@@ -110,6 +110,8 @@ enum pkt_vec {
* struct nfp_app_bpf - bpf app priv structure * struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app * @app: backpointer to the app
* *
* @bpf_dev: BPF offload device handle
*
* @tag_allocator: bitmap of control message tags in use * @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate * @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed * @tag_alloc_last: next tag bit to be freed
...@@ -150,6 +152,8 @@ enum pkt_vec { ...@@ -150,6 +152,8 @@ enum pkt_vec {
struct nfp_app_bpf { struct nfp_app_bpf {
struct nfp_app *app; struct nfp_app *app;
struct bpf_offload_dev *bpf_dev;
DECLARE_BITMAP(tag_allocator, U16_MAX + 1); DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next; u16 tag_alloc_next;
u16 tag_alloc_last; u16 tag_alloc_last;
......
...@@ -592,11 +592,16 @@ int nsim_bpf_init(struct netdevsim *ns) ...@@ -592,11 +592,16 @@ int nsim_bpf_init(struct netdevsim *ns)
debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir); debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir);
if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
return -ENOMEM; return -ENOMEM;
ns->sdev->bpf_dev = bpf_offload_dev_create();
err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
if (err)
return err;
} }
err = bpf_offload_dev_netdev_register(ns->netdev); err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev);
if (err) if (err)
return err; goto err_destroy_bdev;
debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir, debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
&ns->bpf_offloaded_id); &ns->bpf_offloaded_id);
...@@ -624,6 +629,11 @@ int nsim_bpf_init(struct netdevsim *ns) ...@@ -624,6 +629,11 @@ int nsim_bpf_init(struct netdevsim *ns)
&ns->bpf_map_accept); &ns->bpf_map_accept);
return 0; return 0;
err_destroy_bdev:
if (ns->sdev->refcnt == 1)
bpf_offload_dev_destroy(ns->sdev->bpf_dev);
return err;
} }
void nsim_bpf_uninit(struct netdevsim *ns) void nsim_bpf_uninit(struct netdevsim *ns)
...@@ -631,10 +641,11 @@ void nsim_bpf_uninit(struct netdevsim *ns) ...@@ -631,10 +641,11 @@ void nsim_bpf_uninit(struct netdevsim *ns)
WARN_ON(ns->xdp.prog); WARN_ON(ns->xdp.prog);
WARN_ON(ns->xdp_hw.prog); WARN_ON(ns->xdp_hw.prog);
WARN_ON(ns->bpf_offloaded); WARN_ON(ns->bpf_offloaded);
bpf_offload_dev_netdev_unregister(ns->netdev); bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev);
if (ns->sdev->refcnt == 1) { if (ns->sdev->refcnt == 1) {
WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs)); WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs));
WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps)); WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps));
bpf_offload_dev_destroy(ns->sdev->bpf_dev);
} }
} }
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg) #define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg)
struct bpf_prog; struct bpf_prog;
struct bpf_offload_dev;
struct dentry; struct dentry;
struct nsim_vf_config; struct nsim_vf_config;
...@@ -36,6 +37,8 @@ struct netdevsim_shared_dev { ...@@ -36,6 +37,8 @@ struct netdevsim_shared_dev {
struct dentry *ddir; struct dentry *ddir;
struct bpf_offload_dev *bpf_dev;
struct dentry *ddir_bpf_bound_progs; struct dentry *ddir_bpf_bound_progs;
u32 prog_id_gen; u32 prog_id_gen;
......
...@@ -85,6 +85,7 @@ struct bpf_map { ...@@ -85,6 +85,7 @@ struct bpf_map {
char name[BPF_OBJ_NAME_LEN]; char name[BPF_OBJ_NAME_LEN];
}; };
struct bpf_offload_dev;
struct bpf_offloaded_map; struct bpf_offloaded_map;
struct bpf_map_dev_ops { struct bpf_map_dev_ops {
...@@ -650,8 +651,12 @@ int bpf_map_offload_get_next_key(struct bpf_map *map, ...@@ -650,8 +651,12 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
int bpf_offload_dev_netdev_register(struct net_device *netdev); struct bpf_offload_dev *bpf_offload_dev_create(void);
void bpf_offload_dev_netdev_unregister(struct net_device *netdev); void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
struct net_device *netdev);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
......
...@@ -32,11 +32,17 @@ ...@@ -32,11 +32,17 @@
*/ */
static DECLARE_RWSEM(bpf_devs_lock); static DECLARE_RWSEM(bpf_devs_lock);
struct bpf_offload_dev {
struct list_head netdevs;
};
struct bpf_offload_netdev { struct bpf_offload_netdev {
struct rhash_head l; struct rhash_head l;
struct net_device *netdev; struct net_device *netdev;
struct bpf_offload_dev *offdev;
struct list_head progs; struct list_head progs;
struct list_head maps; struct list_head maps;
struct list_head offdev_netdevs;
}; };
static const struct rhashtable_params offdevs_params = { static const struct rhashtable_params offdevs_params = {
...@@ -526,25 +532,18 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) ...@@ -526,25 +532,18 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
return ret; return ret;
} }
int bpf_offload_dev_netdev_register(struct net_device *netdev) int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev)
{ {
struct bpf_offload_netdev *ondev; struct bpf_offload_netdev *ondev;
int err; int err;
down_write(&bpf_devs_lock);
if (!offdevs_inited) {
err = rhashtable_init(&offdevs, &offdevs_params);
if (err)
return err;
offdevs_inited = true;
}
up_write(&bpf_devs_lock);
ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
if (!ondev) if (!ondev)
return -ENOMEM; return -ENOMEM;
ondev->netdev = netdev; ondev->netdev = netdev;
ondev->offdev = offdev;
INIT_LIST_HEAD(&ondev->progs); INIT_LIST_HEAD(&ondev->progs);
INIT_LIST_HEAD(&ondev->maps); INIT_LIST_HEAD(&ondev->maps);
...@@ -555,6 +554,7 @@ int bpf_offload_dev_netdev_register(struct net_device *netdev) ...@@ -555,6 +554,7 @@ int bpf_offload_dev_netdev_register(struct net_device *netdev)
goto err_unlock_free; goto err_unlock_free;
} }
list_add(&ondev->offdev_netdevs, &offdev->netdevs);
up_write(&bpf_devs_lock); up_write(&bpf_devs_lock);
return 0; return 0;
...@@ -565,11 +565,12 @@ int bpf_offload_dev_netdev_register(struct net_device *netdev) ...@@ -565,11 +565,12 @@ int bpf_offload_dev_netdev_register(struct net_device *netdev)
} }
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
void bpf_offload_dev_netdev_unregister(struct net_device *netdev) void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
struct net_device *netdev)
{ {
struct bpf_offload_netdev *ondev, *altdev;
struct bpf_offloaded_map *offmap, *mtmp; struct bpf_offloaded_map *offmap, *mtmp;
struct bpf_prog_offload *offload, *ptmp; struct bpf_prog_offload *offload, *ptmp;
struct bpf_offload_netdev *ondev;
ASSERT_RTNL(); ASSERT_RTNL();
...@@ -579,11 +580,26 @@ void bpf_offload_dev_netdev_unregister(struct net_device *netdev) ...@@ -579,11 +580,26 @@ void bpf_offload_dev_netdev_unregister(struct net_device *netdev)
goto unlock; goto unlock;
WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
list_del(&ondev->offdev_netdevs);
list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
__bpf_prog_offload_destroy(offload->prog); /* Try to move the objects to another netdev of the device */
list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) altdev = list_first_entry_or_null(&offdev->netdevs,
__bpf_map_offload_destroy(offmap); struct bpf_offload_netdev,
offdev_netdevs);
if (altdev) {
list_for_each_entry(offload, &ondev->progs, offloads)
offload->netdev = altdev->netdev;
list_splice_init(&ondev->progs, &altdev->progs);
list_for_each_entry(offmap, &ondev->maps, offloads)
offmap->netdev = altdev->netdev;
list_splice_init(&ondev->maps, &altdev->maps);
} else {
list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
__bpf_prog_offload_destroy(offload->prog);
list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
__bpf_map_offload_destroy(offmap);
}
WARN_ON(!list_empty(&ondev->progs)); WARN_ON(!list_empty(&ondev->progs));
WARN_ON(!list_empty(&ondev->maps)); WARN_ON(!list_empty(&ondev->maps));
...@@ -592,3 +608,34 @@ void bpf_offload_dev_netdev_unregister(struct net_device *netdev) ...@@ -592,3 +608,34 @@ void bpf_offload_dev_netdev_unregister(struct net_device *netdev)
up_write(&bpf_devs_lock); up_write(&bpf_devs_lock);
} }
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
struct bpf_offload_dev *bpf_offload_dev_create(void)
{
struct bpf_offload_dev *offdev;
int err;
down_write(&bpf_devs_lock);
if (!offdevs_inited) {
err = rhashtable_init(&offdevs, &offdevs_params);
if (err)
return ERR_PTR(err);
offdevs_inited = true;
}
up_write(&bpf_devs_lock);
offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
if (!offdev)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&offdev->netdevs);
return offdev;
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
{
WARN_ON(!list_empty(&offdev->netdevs));
kfree(offdev);
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment