Commit 76dc5a84 authored by Yishai Hadas's avatar Yishai Hadas Committed by Jason Gunthorpe

IB/mlx5: Manage device uid for DEVX white list commands

Manage device uid for DEVX white list commands.  The created device uid
will be used on white list commands if the user didn't supply its own uid.

This will enable the firmware to filter out non privileged functionality
as of the recognition of the uid.
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 7f72052c
...@@ -45,13 +45,14 @@ static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file) ...@@ -45,13 +45,14 @@ static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
return to_mucontext(ib_uverbs_get_ucontext(file)); return to_mucontext(ib_uverbs_get_ucontext(file));
} }
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) int mlx5_ib_devx_create(struct mlx5_ib_dev *dev)
{ {
u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0}; u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
u64 general_obj_types; u64 general_obj_types;
void *hdr; void *hdr;
int err; int err;
u16 uid;
hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr); hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
...@@ -70,19 +71,18 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex ...@@ -70,19 +71,18 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex
if (err) if (err)
return err; return err;
context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return 0; return uid;
} }
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
struct mlx5_ib_ucontext *context)
{ {
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX); MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid); MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, uid);
mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
} }
......
...@@ -1765,9 +1765,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1765,9 +1765,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_uars; goto out_uars;
} }
err = mlx5_ib_devx_create(dev, context); err = mlx5_ib_devx_create(dev);
if (err) if (err < 0)
goto out_uars; goto out_uars;
context->devx_uid = err;
} }
err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
...@@ -1870,7 +1871,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1870,7 +1871,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
out_devx: out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
mlx5_ib_devx_destroy(dev, context); mlx5_ib_devx_destroy(dev, context->devx_uid);
out_uars: out_uars:
deallocate_uars(dev, context); deallocate_uars(dev, context);
...@@ -1904,7 +1905,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ...@@ -1904,7 +1905,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
if (context->devx_uid) if (context->devx_uid)
mlx5_ib_devx_destroy(dev, context); mlx5_ib_devx_destroy(dev, context->devx_uid);
deallocate_uars(dev, context); deallocate_uars(dev, context);
kfree(bfregi->sys_pages); kfree(bfregi->sys_pages);
...@@ -6189,6 +6190,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev, ...@@ -6189,6 +6190,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
profile->stage[stage].cleanup(dev); profile->stage[stage].cleanup(dev);
} }
if (dev->devx_whitelist_uid)
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
ib_dealloc_device((struct ib_device *)dev); ib_dealloc_device((struct ib_device *)dev);
} }
...@@ -6197,6 +6200,7 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev, ...@@ -6197,6 +6200,7 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
{ {
int err; int err;
int i; int i;
int uid;
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) { if (profile->stage[i].init) {
...@@ -6206,6 +6210,10 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev, ...@@ -6206,6 +6210,10 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
} }
} }
uid = mlx5_ib_devx_create(dev);
if (uid > 0)
dev->devx_whitelist_uid = uid;
dev->profile = profile; dev->profile = profile;
dev->ib_active = true; dev->ib_active = true;
......
...@@ -925,6 +925,7 @@ struct mlx5_ib_dev { ...@@ -925,6 +925,7 @@ struct mlx5_ib_dev {
struct list_head ib_dev_list; struct list_head ib_dev_list;
u64 sys_image_guid; u64 sys_image_guid;
struct mlx5_memic memic; struct mlx5_memic memic;
u16 devx_whitelist_uid;
}; };
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
...@@ -1249,10 +1250,8 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, ...@@ -1249,10 +1250,8 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num); u8 port_num);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, int mlx5_ib_devx_create(struct mlx5_ib_dev *dev);
struct mlx5_ib_ucontext *context); void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context);
const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void); const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add( struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
...@@ -1263,10 +1262,8 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root); ...@@ -1263,10 +1262,8 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction); void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else #else
static inline int static inline int
mlx5_ib_devx_create(struct mlx5_ib_dev *dev, mlx5_ib_devx_create(struct mlx5_ib_dev *dev) { return -EOPNOTSUPP; };
struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; }; static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
struct mlx5_ib_ucontext *context) {}
static inline const struct uverbs_object_tree_def * static inline const struct uverbs_object_tree_def *
mlx5_ib_get_devx_tree(void) { return NULL; } mlx5_ib_get_devx_tree(void) { return NULL; }
static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment