Commit 75192758 authored by Matthew Brost's avatar Matthew Brost

drm/xe: Add ops_execute function which returns a fence

Add ops_execute function which returns a fence. This will be helpful to
initiate all binds (VM bind IOCTL, rebinds in exec IOCTL, rebinds in
preempt rebind worker, and rebinds in pagefaults) via a gpuva ops list.
Returning a fence is needed in various paths.

v2:
 - Rebase

Cc: Oak Zeng <oak.zeng@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarOak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-3-matthew.brost@intel.com
parent 77f2ef3f
......@@ -1732,16 +1732,17 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
return NULL;
}
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool immediate, bool first_op,
bool last_op)
static struct dma_fence *
xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
bool immediate, bool first_op, bool last_op)
{
struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
struct xe_user_fence *ufence;
xe_vm_assert_held(vm);
xe_bo_assert_held(bo);
ufence = find_ufence_get(syncs, num_syncs);
if (vma->ufence && ufence)
......@@ -1753,7 +1754,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
last_op);
if (IS_ERR(fence))
return PTR_ERR(fence);
return fence;
} else {
int i;
......@@ -1768,24 +1769,12 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
if (last_op)
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
dma_fence_put(fence);
return 0;
}
static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_bo *bo, struct xe_sync_entry *syncs,
u32 num_syncs, bool immediate, bool first_op,
bool last_op)
{
xe_vm_assert_held(vm);
xe_bo_assert_held(bo);
return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
last_op);
return fence;
}
static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
static struct dma_fence *
xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool first_op, bool last_op)
{
......@@ -1797,14 +1786,13 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
if (IS_ERR(fence))
return PTR_ERR(fence);
return fence;
xe_vma_destroy(vma, fence);
if (last_op)
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
dma_fence_put(fence);
return 0;
return fence;
}
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
......@@ -1947,7 +1935,8 @@ static const u32 region_to_mem_type[] = {
XE_PL_VRAM1,
};
static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
static struct dma_fence *
xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, u32 region,
struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op)
......@@ -1960,27 +1949,24 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
if (!xe_vma_has_no_bo(vma)) {
err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
if (err)
return err;
return ERR_PTR(err);
}
if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
true, first_op, last_op);
} else {
struct dma_fence *fence =
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
int i;
/* Nothing to do, signal fences now */
if (last_op) {
for (i = 0; i < num_syncs; i++) {
struct dma_fence *fence =
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], fence);
dma_fence_put(fence);
}
}
return 0;
return fence;
}
}
......@@ -2433,10 +2419,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return 0;
}
static int op_execute(struct xe_vm *vm, struct xe_vma *vma,
static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
struct xe_vma_op *op)
{
int err;
struct dma_fence *fence = NULL;
lockdep_assert_held_write(&vm->lock);
......@@ -2445,7 +2431,7 @@ static int op_execute(struct xe_vm *vm, struct xe_vma *vma,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
op->map.immediate || !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
......@@ -2459,37 +2445,39 @@ static int op_execute(struct xe_vm *vm, struct xe_vma *vma,
if (!op->remap.unmap_done) {
if (prev || next)
vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
err = xe_vm_unbind(vm, vma, op->q, op->syncs,
fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
op->num_syncs,
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST &&
!prev && !next);
if (err)
if (IS_ERR(fence))
break;
op->remap.unmap_done = true;
}
if (prev) {
op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
err = xe_vm_bind(vm, op->remap.prev, op->q,
dma_fence_put(fence);
fence = xe_vm_bind(vm, op->remap.prev, op->q,
xe_vma_bo(op->remap.prev), op->syncs,
op->num_syncs, true, false,
op->flags & XE_VMA_OP_LAST && !next);
op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
if (err)
if (IS_ERR(fence))
break;
op->remap.prev = NULL;
}
if (next) {
op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
err = xe_vm_bind(vm, op->remap.next, op->q,
dma_fence_put(fence);
fence = xe_vm_bind(vm, op->remap.next, op->q,
xe_vma_bo(op->remap.next),
op->syncs, op->num_syncs,
true, false,
op->flags & XE_VMA_OP_LAST);
op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
if (err)
if (IS_ERR(fence))
break;
op->remap.next = NULL;
}
......@@ -2497,12 +2485,12 @@ static int op_execute(struct xe_vm *vm, struct xe_vma *vma,
break;
}
case DRM_GPUVA_OP_UNMAP:
err = xe_vm_unbind(vm, vma, op->q, op->syncs,
fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
op->num_syncs, op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
case DRM_GPUVA_OP_PREFETCH:
err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
fence = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
op->syncs, op->num_syncs,
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
......@@ -2511,20 +2499,22 @@ static int op_execute(struct xe_vm *vm, struct xe_vma *vma,
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
if (err)
if (IS_ERR(fence))
trace_xe_vma_fail(vma);
return err;
return fence;
}
static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
static struct dma_fence *
__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
struct xe_vma_op *op)
{
struct dma_fence *fence;
int err;
retry_userptr:
err = op_execute(vm, vma, op);
if (err == -EAGAIN) {
fence = op_execute(vm, vma, op);
if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
lockdep_assert_held_write(&vm->lock);
if (op->base.op == DRM_GPUVA_OP_REMAP) {
......@@ -2541,22 +2531,24 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
if (!err)
goto retry_userptr;
fence = ERR_PTR(err);
trace_xe_vma_fail(vma);
}
}
return err;
return fence;
}
static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
static struct dma_fence *
xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
{
int ret = 0;
struct dma_fence *fence = ERR_PTR(-ENOMEM);
lockdep_assert_held_write(&vm->lock);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
ret = __xe_vma_op_execute(vm, op->map.vma, op);
fence = __xe_vma_op_execute(vm, op->map.vma, op);
break;
case DRM_GPUVA_OP_REMAP:
{
......@@ -2569,15 +2561,15 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
else
vma = op->remap.next;
ret = __xe_vma_op_execute(vm, vma, op);
fence = __xe_vma_op_execute(vm, vma, op);
break;
}
case DRM_GPUVA_OP_UNMAP:
ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
op);
break;
case DRM_GPUVA_OP_PREFETCH:
ret = __xe_vma_op_execute(vm,
fence = __xe_vma_op_execute(vm,
gpuva_to_vma(op->base.prefetch.va),
op);
break;
......@@ -2585,7 +2577,7 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
return ret;
return fence;
}
static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
......@@ -2760,11 +2752,35 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
return 0;
}
static struct dma_fence *ops_execute(struct xe_vm *vm,
struct list_head *ops_list,
bool cleanup)
{
struct xe_vma_op *op, *next;
struct dma_fence *fence = NULL;
list_for_each_entry_safe(op, next, ops_list, link) {
if (!IS_ERR(fence)) {
dma_fence_put(fence);
fence = xe_vma_op_execute(vm, op);
}
if (IS_ERR(fence)) {
drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
op->base.op, PTR_ERR(fence));
fence = ERR_PTR(-ENOSPC);
}
if (cleanup)
xe_vma_op_cleanup(vm, op);
}
return fence;
}
static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct list_head *ops_list)
{
struct drm_exec exec;
struct xe_vma_op *op, *next;
struct dma_fence *fence;
int err;
lockdep_assert_held_write(&vm->lock);
......@@ -2777,19 +2793,14 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
if (err)
goto unlock;
list_for_each_entry_safe(op, next, ops_list, link) {
err = xe_vma_op_execute(vm, op);
if (err) {
drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
op->base.op, err);
/*
* FIXME: Killing VM rather than proper error handling
*/
fence = ops_execute(vm, ops_list, true);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
/* FIXME: Killing VM rather than proper error handling */
xe_vm_kill(vm, false);
err = -ENOSPC;
goto unlock;
}
xe_vma_op_cleanup(vm, op);
} else {
dma_fence_put(fence);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment