Commit a124edba authored by Yishai Hadas's avatar Yishai Hadas Committed by Jason Gunthorpe

IB/mlx5: Introduce async DEVX obj query API

Introduce async DEVX obj query API to get the command response back to
user space once it's ready without blocking when calling the firmware.

The event's data includes a header with some meta data then the firmware
output command data.

The header includes:
- The input 'wr_id' to let application recognizing the response.

The input FD attribute is used to have the event data ready on.
Downstream patches from this series will implement the file ops to let
application read it.
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 6bf8f22a
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <rdma/uverbs_types.h> #include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h> #include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h> #include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/uverbs_std_types.h> #include <rdma/uverbs_std_types.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
...@@ -17,6 +18,16 @@ ...@@ -17,6 +18,16 @@
#define UVERBS_MODULE_NAME mlx5_ib #define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h> #include <rdma/uverbs_named_ioctl.h>
struct devx_async_data {
struct mlx5_ib_dev *mdev;
struct list_head list;
struct ib_uobject *fd_uobj;
struct mlx5_async_work cb_work;
u16 cmd_out_len;
/* must be last field in this structure */
struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
};
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in) #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
struct devx_obj { struct devx_obj {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
...@@ -1172,11 +1183,13 @@ struct devx_async_event_queue { ...@@ -1172,11 +1183,13 @@ struct devx_async_event_queue {
spinlock_t lock; spinlock_t lock;
wait_queue_head_t poll_wait; wait_queue_head_t poll_wait;
struct list_head event_list; struct list_head event_list;
atomic_t bytes_in_use;
}; };
struct devx_async_cmd_event_file { struct devx_async_cmd_event_file {
struct ib_uobject uobj; struct ib_uobject uobj;
struct devx_async_event_queue ev_queue; struct devx_async_event_queue ev_queue;
struct mlx5_async_ctx async_ctx;
}; };
static void devx_init_event_queue(struct devx_async_event_queue *ev_queue) static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
...@@ -1184,6 +1197,7 @@ static void devx_init_event_queue(struct devx_async_event_queue *ev_queue) ...@@ -1184,6 +1197,7 @@ static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
spin_lock_init(&ev_queue->lock); spin_lock_init(&ev_queue->lock);
INIT_LIST_HEAD(&ev_queue->event_list); INIT_LIST_HEAD(&ev_queue->event_list);
init_waitqueue_head(&ev_queue->poll_wait); init_waitqueue_head(&ev_queue->poll_wait);
atomic_set(&ev_queue->bytes_in_use, 0);
} }
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)( static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
...@@ -1193,13 +1207,123 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)( ...@@ -1193,13 +1207,123 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
struct ib_uobject *uobj = uverbs_attr_get_uobject( struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE); attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
ev_file = container_of(uobj, struct devx_async_cmd_event_file, ev_file = container_of(uobj, struct devx_async_cmd_event_file,
uobj); uobj);
devx_init_event_queue(&ev_file->ev_queue); devx_init_event_queue(&ev_file->ev_queue);
mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
return 0; return 0;
} }
static void devx_query_callback(int status, struct mlx5_async_work *context)
{
struct devx_async_data *async_data =
container_of(context, struct devx_async_data, cb_work);
struct ib_uobject *fd_uobj = async_data->fd_uobj;
struct devx_async_cmd_event_file *ev_file;
struct devx_async_event_queue *ev_queue;
unsigned long flags;
ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
uobj);
ev_queue = &ev_file->ev_queue;
spin_lock_irqsave(&ev_queue->lock, flags);
list_add_tail(&async_data->list, &ev_queue->event_list);
spin_unlock_irqrestore(&ev_queue->lock, flags);
wake_up_interruptible(&ev_queue->poll_wait);
fput(fd_uobj->object);
}
#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
struct uverbs_attr_bundle *attrs)
{
void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
u16 cmd_out_len;
struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
struct ib_uobject *fd_uobj;
int err;
int uid;
struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
struct devx_async_cmd_event_file *ev_file;
struct devx_async_data *async_data;
uid = devx_get_uid(c, cmd_in);
if (uid < 0)
return uid;
if (!devx_is_obj_query_cmd(cmd_in))
return -EINVAL;
err = uverbs_get_const(&cmd_out_len, attrs,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
if (err)
return err;
if (!devx_is_valid_obj_id(uobj, cmd_in))
return -EINVAL;
fd_uobj = uverbs_attr_get_uobject(attrs,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
if (IS_ERR(fd_uobj))
return PTR_ERR(fd_uobj);
ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
uobj);
if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
MAX_ASYNC_BYTES_IN_USE) {
atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
return -EAGAIN;
}
async_data = kvzalloc(struct_size(async_data, hdr.out_data,
cmd_out_len), GFP_KERNEL);
if (!async_data) {
err = -ENOMEM;
goto sub_bytes;
}
err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
if (err)
goto free_async;
async_data->cmd_out_len = cmd_out_len;
async_data->mdev = mdev;
async_data->fd_uobj = fd_uobj;
get_file(fd_uobj->object);
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
async_data->hdr.out_data,
async_data->cmd_out_len,
devx_query_callback, &async_data->cb_work);
if (err)
goto cb_err;
return 0;
cb_err:
fput(fd_uobj->object);
free_async:
kvfree(async_data);
sub_bytes:
atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
return err;
}
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
struct uverbs_attr_bundle *attrs, struct uverbs_attr_bundle *attrs,
struct devx_umem *obj) struct devx_umem *obj)
...@@ -1353,6 +1477,17 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf, ...@@ -1353,6 +1477,17 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
static int devx_async_cmd_event_close(struct inode *inode, struct file *filp) static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
{ {
struct ib_uobject *uobj = filp->private_data;
struct devx_async_cmd_event_file *comp_ev_file = container_of(
uobj, struct devx_async_cmd_event_file, uobj);
struct devx_async_data *entry, *tmp;
spin_lock_irq(&comp_ev_file->ev_queue.lock);
list_for_each_entry_safe(entry, tmp,
&comp_ev_file->ev_queue.event_list, list)
kvfree(entry);
spin_unlock_irq(&comp_ev_file->ev_queue.lock);
uverbs_close_fd(filp); uverbs_close_fd(filp);
return 0; return 0;
} }
...@@ -1374,6 +1509,11 @@ const struct file_operations devx_async_cmd_event_fops = { ...@@ -1374,6 +1509,11 @@ const struct file_operations devx_async_cmd_event_fops = {
static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj, static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
enum rdma_remove_reason why) enum rdma_remove_reason why)
{ {
struct devx_async_cmd_event_file *comp_ev_file =
container_of(uobj, struct devx_async_cmd_event_file,
uobj);
mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
return 0; return 0;
}; };
...@@ -1487,6 +1627,27 @@ DECLARE_UVERBS_NAMED_METHOD( ...@@ -1487,6 +1627,27 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
UA_MANDATORY)); UA_MANDATORY));
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
UVERBS_IDR_ANY_OBJECT,
UVERBS_ACCESS_READ,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(
MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
UA_MANDATORY,
UA_ALLOC_AND_COPY),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
u16, UA_MANDATORY),
UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
UVERBS_ACCESS_READ,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
UVERBS_ATTR_TYPE(u64),
UA_MANDATORY));
DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX, DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER), &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR), &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
...@@ -1497,7 +1658,8 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ, ...@@ -1497,7 +1658,8 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE), &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY), &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY), &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY)); &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM, DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup), UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
......
...@@ -84,6 +84,14 @@ enum mlx5_ib_devx_obj_query_attrs { ...@@ -84,6 +84,14 @@ enum mlx5_ib_devx_obj_query_attrs {
MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
}; };
enum mlx5_ib_devx_obj_query_async_attrs {
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
};
enum mlx5_ib_devx_query_eqn_attrs { enum mlx5_ib_devx_query_eqn_attrs {
MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
...@@ -94,6 +102,7 @@ enum mlx5_ib_devx_obj_methods { ...@@ -94,6 +102,7 @@ enum mlx5_ib_devx_obj_methods {
MLX5_IB_METHOD_DEVX_OBJ_DESTROY, MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
MLX5_IB_METHOD_DEVX_OBJ_MODIFY, MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
MLX5_IB_METHOD_DEVX_OBJ_QUERY, MLX5_IB_METHOD_DEVX_OBJ_QUERY,
MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
}; };
enum mlx5_ib_devx_umem_reg_attrs { enum mlx5_ib_devx_umem_reg_attrs {
......
...@@ -51,5 +51,10 @@ enum mlx5_ib_uapi_flow_action_packet_reformat_type { ...@@ -51,5 +51,10 @@ enum mlx5_ib_uapi_flow_action_packet_reformat_type {
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3, MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
}; };
struct mlx5_ib_uapi_devx_async_cmd_hdr {
__aligned_u64 wr_id;
__u8 out_data[];
};
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment