Commit b21ae51d authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe/uapi: Kill DRM_XE_UFENCE_WAIT_VM_ERROR

This is not used nor does it align VM async document, kill this.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 7224788f
...@@ -1621,9 +1621,6 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1621,9 +1621,6 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vma_destroy_unlocked(vma); xe_vma_destroy_unlocked(vma);
} }
if (vm->async_ops.error_capture.addr)
wake_up_all(&vm->async_ops.error_capture.wq);
xe_assert(xe, list_empty(&vm->extobj.list)); xe_assert(xe, list_empty(&vm->extobj.list));
up_write(&vm->lock); up_write(&vm->lock);
......
...@@ -215,17 +215,6 @@ struct xe_vm { ...@@ -215,17 +215,6 @@ struct xe_vm {
struct work_struct work; struct work_struct work;
/** @lock: protects list of pending async VM ops and fences */ /** @lock: protects list of pending async VM ops and fences */
spinlock_t lock; spinlock_t lock;
/** @error_capture: error capture state */
struct {
/** @mm: user MM */
struct mm_struct *mm;
/**
* @addr: user pointer to copy error capture state too
*/
u64 addr;
/** @wq: user fence wait queue for VM errors */
wait_queue_head_t wq;
} error_capture;
/** @fence: fence state */ /** @fence: fence state */
struct { struct {
/** @context: context of async fence */ /** @context: context of async fence */
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include "xe_device.h" #include "xe_device.h"
#include "xe_gt.h" #include "xe_gt.h"
#include "xe_macros.h" #include "xe_macros.h"
#include "xe_vm.h"
static int do_compare(u64 addr, u64 value, u64 mask, u16 op) static int do_compare(u64 addr, u64 value, u64 mask, u16 op)
{ {
...@@ -81,8 +80,7 @@ static int check_hw_engines(struct xe_device *xe, ...@@ -81,8 +80,7 @@ static int check_hw_engines(struct xe_device *xe,
} }
#define VALID_FLAGS (DRM_XE_UFENCE_WAIT_SOFT_OP | \ #define VALID_FLAGS (DRM_XE_UFENCE_WAIT_SOFT_OP | \
DRM_XE_UFENCE_WAIT_ABSTIME | \ DRM_XE_UFENCE_WAIT_ABSTIME)
DRM_XE_UFENCE_WAIT_VM_ERROR)
#define MAX_OP DRM_XE_UFENCE_WAIT_LTE #define MAX_OP DRM_XE_UFENCE_WAIT_LTE
static long to_jiffies_timeout(struct xe_device *xe, static long to_jiffies_timeout(struct xe_device *xe,
...@@ -137,11 +135,9 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -137,11 +135,9 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE]; struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_xe_engine_class_instance __user *user_eci = struct drm_xe_engine_class_instance __user *user_eci =
u64_to_user_ptr(args->instances); u64_to_user_ptr(args->instances);
struct xe_vm *vm = NULL;
u64 addr = args->addr; u64 addr = args->addr;
int err; int err;
bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP || bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP;
args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR;
long timeout; long timeout;
ktime_t start; ktime_t start;
...@@ -162,8 +158,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -162,8 +158,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !no_engines && !args->num_engines)) if (XE_IOCTL_DBG(xe, !no_engines && !args->num_engines))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) && if (XE_IOCTL_DBG(xe, addr & 0x7))
addr & 0x7))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, args->num_engines > XE_HW_ENGINE_MAX_INSTANCE)) if (XE_IOCTL_DBG(xe, args->num_engines > XE_HW_ENGINE_MAX_INSTANCE))
...@@ -181,22 +176,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -181,22 +176,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) {
if (XE_IOCTL_DBG(xe, args->vm_id >> 32))
return -EINVAL;
vm = xe_vm_lookup(to_xe_file(file), args->vm_id);
if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT;
if (XE_IOCTL_DBG(xe, !vm->async_ops.error_capture.addr)) {
xe_vm_put(vm);
return -EOPNOTSUPP;
}
addr = vm->async_ops.error_capture.addr;
}
timeout = to_jiffies_timeout(xe, args); timeout = to_jiffies_timeout(xe, args);
start = ktime_get(); start = ktime_get();
...@@ -207,15 +186,8 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -207,15 +186,8 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
* hardware engine. Open coding as 'do_compare' can sleep which doesn't * hardware engine. Open coding as 'do_compare' can sleep which doesn't
* work with the wait_event_* macros. * work with the wait_event_* macros.
*/ */
if (vm) add_wait_queue(&xe->ufence_wq, &w_wait);
add_wait_queue(&vm->async_ops.error_capture.wq, &w_wait);
else
add_wait_queue(&xe->ufence_wq, &w_wait);
for (;;) { for (;;) {
if (vm && xe_vm_is_closed(vm)) {
err = -ENODEV;
break;
}
err = do_compare(addr, args->value, args->mask, args->op); err = do_compare(addr, args->value, args->mask, args->op);
if (err <= 0) if (err <= 0)
break; break;
...@@ -232,12 +204,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -232,12 +204,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
timeout = wait_woken(&w_wait, TASK_INTERRUPTIBLE, timeout); timeout = wait_woken(&w_wait, TASK_INTERRUPTIBLE, timeout);
} }
if (vm) { remove_wait_queue(&xe->ufence_wq, &w_wait);
remove_wait_queue(&vm->async_ops.error_capture.wq, &w_wait);
xe_vm_put(vm);
} else {
remove_wait_queue(&xe->ufence_wq, &w_wait);
}
if (!(args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)) { if (!(args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)) {
args->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start)); args->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start));
......
...@@ -905,18 +905,10 @@ struct drm_xe_wait_user_fence { ...@@ -905,18 +905,10 @@ struct drm_xe_wait_user_fence {
/** @extensions: Pointer to the first extension struct, if any */ /** @extensions: Pointer to the first extension struct, if any */
__u64 extensions; __u64 extensions;
union { /**
/** * @addr: user pointer address to wait on, must qword aligned
* @addr: user pointer address to wait on, must qword aligned */
*/ __u64 addr;
__u64 addr;
/**
* @vm_id: The ID of the VM which encounter an error used with
* DRM_XE_UFENCE_WAIT_VM_ERROR. Upper 32 bits must be clear.
*/
__u64 vm_id;
};
#define DRM_XE_UFENCE_WAIT_EQ 0 #define DRM_XE_UFENCE_WAIT_EQ 0
#define DRM_XE_UFENCE_WAIT_NEQ 1 #define DRM_XE_UFENCE_WAIT_NEQ 1
...@@ -929,7 +921,6 @@ struct drm_xe_wait_user_fence { ...@@ -929,7 +921,6 @@ struct drm_xe_wait_user_fence {
#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */ #define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1) #define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1)
#define DRM_XE_UFENCE_WAIT_VM_ERROR (1 << 2)
/** @flags: wait flags */ /** @flags: wait flags */
__u16 flags; __u16 flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment