Commit 5aa5eea0 authored by Matthew Brost's avatar Matthew Brost

drm/xe: Move ufence add to vm_bind_ioctl_ops_fini

Rather than adding a ufence to a VMA in the bind function, add the
ufence to all VMAs in the IOCTL that require binds in
vm_bind_ioctl_ops_fini. This help withs the transition to job 1 per VM
bind IOCTL.

v2:
 - Rebase
v3:
 - Fix typo in commit (Oak)
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarOak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-12-matthew.brost@intel.com
parent fda75ef8
......@@ -338,6 +338,21 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
return ERR_PTR(-ENOMEM);
}
/**
* __xe_sync_ufence_get() - Get user fence from user fence
* @ufence: input user fence
*
* Get a user fence reference from user fence
*
* Return: xe_user_fence pointer with reference
*/
struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence)
{
user_fence_get(ufence);
return ufence;
}
/**
* xe_sync_ufence_get() - Get user fence from sync
* @sync: input sync
......
......@@ -37,6 +37,7 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
return !!sync->ufence;
}
struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence);
struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
void xe_sync_ufence_put(struct xe_user_fence *ufence);
int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
......
......@@ -1798,17 +1798,10 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
{
struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
struct xe_user_fence *ufence;
xe_vm_assert_held(vm);
xe_bo_assert_held(bo);
ufence = find_ufence_get(syncs, num_syncs);
if (vma->ufence && ufence)
xe_sync_ufence_put(vma->ufence);
vma->ufence = ufence ?: vma->ufence;
if (immediate) {
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
first_op, last_op);
......@@ -2817,20 +2810,57 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
return fence;
}
static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
{
if (vma->ufence)
xe_sync_ufence_put(vma->ufence);
vma->ufence = __xe_sync_ufence_get(ufence);
}
static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
struct xe_user_fence *ufence)
{
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
vma_add_ufence(op->map.vma, ufence);
break;
case DRM_GPUVA_OP_REMAP:
if (op->remap.prev)
vma_add_ufence(op->remap.prev, ufence);
if (op->remap.next)
vma_add_ufence(op->remap.next, ufence);
break;
case DRM_GPUVA_OP_UNMAP:
break;
case DRM_GPUVA_OP_PREFETCH:
vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
}
static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
struct dma_fence *fence)
{
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
struct xe_user_fence *ufence;
struct xe_vma_op *op;
int i;
ufence = find_ufence_get(vops->syncs, vops->num_syncs);
list_for_each_entry(op, &vops->list, link) {
if (ufence)
op_add_ufence(vm, op, ufence);
if (op->base.op == DRM_GPUVA_OP_UNMAP)
xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
else if (op->base.op == DRM_GPUVA_OP_REMAP)
xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
fence);
}
if (ufence)
xe_sync_ufence_put(ufence);
for (i = 0; i < vops->num_syncs; i++)
xe_sync_entry_signal(vops->syncs + i, fence);
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment