Commit fdb6a053 authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe: Internally change the compute_mode and no_dma_fence mode naming

The name "compute_mode" can be confusing since compute uses either this
mode or fault_mode to achieve the long-running semantics, and compute_mode
can, moving forward, enable fault_mode under the hood to work around
hardware limitations.

Also the name no_dma_fence_mode really refers to what we elsewhere call
long-running mode and the mode contrary to what its name suggests allows
dma-fences as in-fences.

So in an attempt to be more consistent, rename
no_dma_fence_mode -> lr_mode
compute_mode      -> preempt_fence_mode

And adjust flags so that

preempt_fence_mode sets XE_VM_FLAG_LR_MODE
fault_mode sets XE_VM_FLAG_LR_MODE | XE_VM_FLAG_FAULT_MODE

v2:
- Fix a typo in the commit message (Oak Zeng)
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarOak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231127123349.23698-1-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent d2f51c50
...@@ -100,7 +100,7 @@ static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm) ...@@ -100,7 +100,7 @@ static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm)
LIST_HEAD(dups); LIST_HEAD(dups);
int err = 0; int err = 0;
if (xe_vm_no_dma_fences(vm)) if (xe_vm_in_lr_mode(vm))
return 0; return 0;
/* /*
...@@ -182,7 +182,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -182,7 +182,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < args->num_syncs; i++) { for (i = 0; i < args->num_syncs; i++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++], err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
&syncs_user[i], true, &syncs_user[i], true,
xe_vm_no_dma_fences(vm)); xe_vm_in_lr_mode(vm));
if (err) if (err)
goto err_syncs; goto err_syncs;
} }
...@@ -197,7 +197,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -197,7 +197,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
} }
retry: retry:
if (!xe_vm_no_dma_fences(vm) && xe_vm_userptr_check_repin(vm)) { if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
err = down_write_killable(&vm->lock); err = down_write_killable(&vm->lock);
write_locked = true; write_locked = true;
} else { } else {
...@@ -279,7 +279,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -279,7 +279,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
} }
/* Wait behind munmap style rebinds */ /* Wait behind munmap style rebinds */
if (!xe_vm_no_dma_fences(vm)) { if (!xe_vm_in_lr_mode(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm, err = drm_sched_job_add_resv_dependencies(&job->drm,
xe_vm_resv(vm), xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL); DMA_RESV_USAGE_KERNEL);
...@@ -292,7 +292,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -292,7 +292,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (err) if (err)
goto err_put_job; goto err_put_job;
if (!xe_vm_no_dma_fences(vm)) { if (!xe_vm_in_lr_mode(vm)) {
err = down_read_interruptible(&vm->userptr.notifier_lock); err = down_read_interruptible(&vm->userptr.notifier_lock);
if (err) if (err)
goto err_put_job; goto err_put_job;
...@@ -307,7 +307,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -307,7 +307,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
* the job and let the DRM scheduler / backend clean up the job. * the job and let the DRM scheduler / backend clean up the job.
*/ */
xe_sched_job_arm(job); xe_sched_job_arm(job);
if (!xe_vm_no_dma_fences(vm)) { if (!xe_vm_in_lr_mode(vm)) {
/* Block userptr invalidations / BO eviction */ /* Block userptr invalidations / BO eviction */
dma_resv_add_fence(xe_vm_resv(vm), dma_resv_add_fence(xe_vm_resv(vm),
&job->drm.s_fence->finished, &job->drm.s_fence->finished,
...@@ -330,14 +330,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -330,14 +330,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
xe_sched_job_push(job); xe_sched_job_push(job);
xe_vm_reactivate_rebind(vm); xe_vm_reactivate_rebind(vm);
if (!err && !xe_vm_no_dma_fences(vm)) { if (!err && !xe_vm_in_lr_mode(vm)) {
spin_lock(&xe->ttm.lru_lock); spin_lock(&xe->ttm.lru_lock);
ttm_lru_bulk_move_tail(&vm->lru_bulk_move); ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
spin_unlock(&xe->ttm.lru_lock); spin_unlock(&xe->ttm.lru_lock);
} }
err_repin: err_repin:
if (!xe_vm_no_dma_fences(vm)) if (!xe_vm_in_lr_mode(vm))
up_read(&vm->userptr.notifier_lock); up_read(&vm->userptr.notifier_lock);
err_put_job: err_put_job:
if (err) if (err)
......
...@@ -327,7 +327,7 @@ static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue ...@@ -327,7 +327,7 @@ static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue
if (XE_IOCTL_DBG(xe, !create)) if (XE_IOCTL_DBG(xe, !create))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, xe_vm_in_compute_mode(q->vm))) if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
return -EINVAL; return -EINVAL;
if (value) if (value)
...@@ -705,14 +705,14 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -705,14 +705,14 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
q = xe_exec_queue_create(xe, vm, logical_mask, q = xe_exec_queue_create(xe, vm, logical_mask,
args->width, hwe, args->width, hwe,
xe_vm_no_dma_fences(vm) ? 0 : xe_vm_in_lr_mode(vm) ? 0 :
EXEC_QUEUE_FLAG_PERSISTENT); EXEC_QUEUE_FLAG_PERSISTENT);
up_read(&vm->lock); up_read(&vm->lock);
xe_vm_put(vm); xe_vm_put(vm);
if (IS_ERR(q)) if (IS_ERR(q))
return PTR_ERR(q); return PTR_ERR(q);
if (xe_vm_in_compute_mode(vm)) { if (xe_vm_in_preempt_fence_mode(vm)) {
q->compute.context = dma_fence_context_alloc(1); q->compute.context = dma_fence_context_alloc(1);
spin_lock_init(&q->compute.lock); spin_lock_init(&q->compute.lock);
...@@ -785,7 +785,7 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data, ...@@ -785,7 +785,7 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
*/ */
bool xe_exec_queue_is_lr(struct xe_exec_queue *q) bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
{ {
return q->vm && xe_vm_no_dma_fences(q->vm) && return q->vm && xe_vm_in_lr_mode(q->vm) &&
!(q->flags & EXEC_QUEUE_FLAG_VM); !(q->flags & EXEC_QUEUE_FLAG_VM);
} }
......
...@@ -1211,7 +1211,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) ...@@ -1211,7 +1211,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
ge->q = q; ge->q = q;
init_waitqueue_head(&ge->suspend_wait); init_waitqueue_head(&ge->suspend_wait);
timeout = (q->vm && xe_vm_no_dma_fences(q->vm)) ? MAX_SCHEDULE_TIMEOUT : timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
q->hwe->eclass->sched_props.job_timeout_ms; q->hwe->eclass->sched_props.job_timeout_ms;
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc), get_submit_wq(guc),
......
...@@ -1292,8 +1292,8 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue ...@@ -1292,8 +1292,8 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
* non-faulting LR, in particular on user-space batch buffer chaining, * non-faulting LR, in particular on user-space batch buffer chaining,
* it needs to be done here. * it needs to be done here.
*/ */
if ((rebind && !xe_vm_no_dma_fences(vm) && !vm->batch_invalidate_tlb) || if ((rebind && !xe_vm_in_lr_mode(vm) && !vm->batch_invalidate_tlb) ||
(!rebind && vm->scratch_bo[tile->id] && xe_vm_in_compute_mode(vm))) { (!rebind && vm->scratch_bo[tile->id] && xe_vm_in_preempt_fence_mode(vm))) {
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence) if (!ifence)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1355,7 +1355,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue ...@@ -1355,7 +1355,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
xe_bo_put_commit(&deferred); xe_bo_put_commit(&deferred);
} }
if (!rebind && last_munmap_rebind && if (!rebind && last_munmap_rebind &&
xe_vm_in_compute_mode(vm)) xe_vm_in_preempt_fence_mode(vm))
xe_vm_queue_rebind_worker(vm); xe_vm_queue_rebind_worker(vm);
} else { } else {
kfree(rfence); kfree(rfence);
......
...@@ -92,7 +92,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q, ...@@ -92,7 +92,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
/* Migration and kernel engines have their own locking */ /* Migration and kernel engines have their own locking */
if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) { if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
lockdep_assert_held(&q->vm->lock); lockdep_assert_held(&q->vm->lock);
if (!xe_vm_no_dma_fences(q->vm)) if (!xe_vm_in_lr_mode(q->vm))
xe_vm_assert_held(q->vm); xe_vm_assert_held(q->vm);
} }
......
...@@ -100,7 +100,7 @@ static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) ...@@ -100,7 +100,7 @@ static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync, struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user, struct drm_xe_sync __user *sync_user,
bool exec, bool no_dma_fences) bool exec, bool in_lr_mode)
{ {
struct drm_xe_sync sync_in; struct drm_xe_sync sync_in;
int err; int err;
...@@ -118,7 +118,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -118,7 +118,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL; signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) { switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
case DRM_XE_SYNC_FLAG_SYNCOBJ: case DRM_XE_SYNC_FLAG_SYNCOBJ:
if (XE_IOCTL_DBG(xe, no_dma_fences && signal)) if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr))) if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
...@@ -136,7 +136,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -136,7 +136,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
break; break;
case DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ: case DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ:
if (XE_IOCTL_DBG(xe, no_dma_fences && signal)) if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr))) if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
......
...@@ -340,7 +340,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) ...@@ -340,7 +340,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
int err; int err;
bool wait; bool wait;
xe_assert(vm->xe, xe_vm_in_compute_mode(vm)); xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
down_write(&vm->lock); down_write(&vm->lock);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
...@@ -394,7 +394,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) ...@@ -394,7 +394,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
*/ */
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{ {
if (!xe_vm_in_compute_mode(vm)) if (!xe_vm_in_preempt_fence_mode(vm))
return; return;
down_write(&vm->lock); down_write(&vm->lock);
...@@ -596,7 +596,7 @@ static void preempt_rebind_work_func(struct work_struct *w) ...@@ -596,7 +596,7 @@ static void preempt_rebind_work_func(struct work_struct *w)
long wait; long wait;
int __maybe_unused tries = 0; int __maybe_unused tries = 0;
xe_assert(vm->xe, xe_vm_in_compute_mode(vm)); xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
trace_xe_vm_rebind_worker_enter(vm); trace_xe_vm_rebind_worker_enter(vm);
down_write(&vm->lock); down_write(&vm->lock);
...@@ -840,7 +840,7 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) ...@@ -840,7 +840,7 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
struct xe_vma *vma, *next; struct xe_vma *vma, *next;
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
if (xe_vm_no_dma_fences(vm) && !rebind_worker) if (xe_vm_in_lr_mode(vm) && !rebind_worker)
return NULL; return NULL;
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
...@@ -1436,9 +1436,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) ...@@ -1436,9 +1436,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->batch_invalidate_tlb = true; vm->batch_invalidate_tlb = true;
} }
if (flags & XE_VM_FLAG_COMPUTE_MODE) { if (flags & XE_VM_FLAG_LR_MODE) {
INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
vm->flags |= XE_VM_FLAG_COMPUTE_MODE; vm->flags |= XE_VM_FLAG_LR_MODE;
vm->batch_invalidate_tlb = false; vm->batch_invalidate_tlb = false;
} }
...@@ -1526,7 +1526,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1526,7 +1526,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_assert(xe, !vm->preempt.num_exec_queues); xe_assert(xe, !vm->preempt.num_exec_queues);
xe_vm_close(vm); xe_vm_close(vm);
if (xe_vm_in_compute_mode(vm)) if (xe_vm_in_preempt_fence_mode(vm))
flush_work(&vm->preempt.rebind_work); flush_work(&vm->preempt.rebind_work);
down_write(&vm->lock); down_write(&vm->lock);
...@@ -1975,11 +1975,11 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, ...@@ -1975,11 +1975,11 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE) if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE; flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE) if (args->flags & DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE)
flags |= XE_VM_FLAG_COMPUTE_MODE; flags |= XE_VM_FLAG_LR_MODE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT) if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
flags |= XE_VM_FLAG_ASYNC_DEFAULT; flags |= XE_VM_FLAG_ASYNC_DEFAULT;
if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
flags |= XE_VM_FLAG_FAULT_MODE; flags |= XE_VM_FLAG_LR_MODE | XE_VM_FLAG_FAULT_MODE;
vm = xe_vm_create(xe, flags); vm = xe_vm_create(xe, flags);
if (IS_ERR(vm)) if (IS_ERR(vm))
...@@ -3066,7 +3066,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3066,7 +3066,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
&syncs_user[num_syncs], false, &syncs_user[num_syncs], false,
xe_vm_no_dma_fences(vm)); xe_vm_in_lr_mode(vm));
if (err) if (err)
goto free_syncs; goto free_syncs;
} }
......
...@@ -149,19 +149,19 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, ...@@ -149,19 +149,19 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
void xe_vm_close_and_put(struct xe_vm *vm); void xe_vm_close_and_put(struct xe_vm *vm);
static inline bool xe_vm_in_compute_mode(struct xe_vm *vm) static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
{ {
return vm->flags & XE_VM_FLAG_COMPUTE_MODE; return vm->flags & XE_VM_FLAG_FAULT_MODE;
} }
static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
{ {
return vm->flags & XE_VM_FLAG_FAULT_MODE; return vm->flags & XE_VM_FLAG_LR_MODE;
} }
static inline bool xe_vm_no_dma_fences(struct xe_vm *vm) static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
{ {
return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm); return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
} }
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q); int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
...@@ -181,7 +181,7 @@ extern struct ttm_device_funcs xe_ttm_funcs; ...@@ -181,7 +181,7 @@ extern struct ttm_device_funcs xe_ttm_funcs;
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{ {
xe_assert(vm->xe, xe_vm_in_compute_mode(vm)); xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
} }
...@@ -196,7 +196,7 @@ static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) ...@@ -196,7 +196,7 @@ static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
*/ */
static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
{ {
if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) { if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
vm->preempt.rebind_deactivated = false; vm->preempt.rebind_deactivated = false;
xe_vm_queue_rebind_worker(vm); xe_vm_queue_rebind_worker(vm);
} }
......
...@@ -154,7 +154,7 @@ struct xe_vm { ...@@ -154,7 +154,7 @@ struct xe_vm {
* from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
*/ */
#define XE_VM_FLAG_64K BIT(0) #define XE_VM_FLAG_64K BIT(0)
#define XE_VM_FLAG_COMPUTE_MODE BIT(1) #define XE_VM_FLAG_LR_MODE BIT(1)
#define XE_VM_FLAG_ASYNC_DEFAULT BIT(2) #define XE_VM_FLAG_ASYNC_DEFAULT BIT(2)
#define XE_VM_FLAG_MIGRATION BIT(3) #define XE_VM_FLAG_MIGRATION BIT(3)
#define XE_VM_FLAG_SCRATCH_PAGE BIT(4) #define XE_VM_FLAG_SCRATCH_PAGE BIT(4)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment