Commit 9212da07 authored by Bommu Krishnaiah's avatar Bommu Krishnaiah Committed by Rodrigo Vivi

drm/xe/uapi: add exec_queue_id member to drm_xe_wait_user_fence structure

remove the num_engines/instances members from drm_xe_wait_user_fence
structure and add a exec_queue_id member

Right now this is only checking if the engine list is sane and nothing
else. In the end every operation with this IOCTL is a soft check.
So, let's formalize that and only use this IOCTL to wait on the fence.

exec_queue_id member will help to user space to get proper error code
from kernel while in exec_queue reset
Signed-off-by: default avatarBommu Krishnaiah <krishnaiah.bommu@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Acked-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Acked-by: default avatarMateusz Naklicki <mateusz.naklicki@intel.com>
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
parent 7a8bc117
......@@ -50,37 +50,7 @@ static int do_compare(u64 addr, u64 value, u64 mask, u16 op)
return passed ? 0 : 1;
}
static const enum xe_engine_class user_to_xe_engine_class[] = {
[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
};
static int check_hw_engines(struct xe_device *xe,
struct drm_xe_engine_class_instance *eci,
int num_engines)
{
int i;
for (i = 0; i < num_engines; ++i) {
enum xe_engine_class user_class =
user_to_xe_engine_class[eci[i].engine_class];
if (eci[i].gt_id >= xe->info.tile_count)
return -EINVAL;
if (!xe_gt_hw_engine(xe_device_get_gt(xe, eci[i].gt_id),
user_class, eci[i].engine_instance, true))
return -EINVAL;
}
return 0;
}
#define VALID_FLAGS (DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP | \
DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)
#define VALID_FLAGS DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
#define MAX_OP DRM_XE_UFENCE_WAIT_OP_LTE
static long to_jiffies_timeout(struct xe_device *xe,
......@@ -132,16 +102,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev);
DEFINE_WAIT_FUNC(w_wait, woken_wake_function);
struct drm_xe_wait_user_fence *args = data;
struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_xe_engine_class_instance __user *user_eci =
u64_to_user_ptr(args->instances);
u64 addr = args->addr;
int err;
bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP;
long timeout;
ktime_t start;
if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
XE_IOCTL_DBG(xe, args->pad2) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
......@@ -151,41 +118,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, args->op > MAX_OP))
return -EINVAL;
if (XE_IOCTL_DBG(xe, no_engines &&
(args->num_engines || args->instances)))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !no_engines && !args->num_engines))
return -EINVAL;
if (XE_IOCTL_DBG(xe, addr & 0x7))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->num_engines > XE_HW_ENGINE_MAX_INSTANCE))
return -EINVAL;
if (!no_engines) {
err = copy_from_user(eci, user_eci,
sizeof(struct drm_xe_engine_class_instance) *
args->num_engines);
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
if (XE_IOCTL_DBG(xe, check_hw_engines(xe, eci,
args->num_engines)))
return -EINVAL;
}
timeout = to_jiffies_timeout(xe, args);
start = ktime_get();
/*
* FIXME: Very simple implementation at the moment, single wait queue
* for everything. Could be optimized to have a wait queue for every
* hardware engine. Open coding as 'do_compare' can sleep which doesn't
* work with the wait_event_* macros.
*/
add_wait_queue(&xe->ufence_wq, &w_wait);
for (;;) {
err = do_compare(addr, args->value, args->mask, args->op);
......
......@@ -1031,8 +1031,7 @@ struct drm_xe_wait_user_fence {
/** @op: wait operation (type of comparison) */
__u16 op;
#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1)
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
/** @flags: wait flags */
__u16 flags;
......@@ -1065,17 +1064,11 @@ struct drm_xe_wait_user_fence {
*/
__s64 timeout;
/**
* @num_engines: number of engine instances to wait on, must be zero
* when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 num_engines;
/** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
__u32 exec_queue_id;
/**
* @instances: user pointer to array of drm_xe_engine_class_instance to
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 instances;
/** @pad2: MBZ */
__u32 pad2;
/** @reserved: Reserved */
__u64 reserved[2];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment