Commit 0a34c124 authored by Matthew Brost's avatar Matthew Brost

drm/xe: Move migrate to prefetch to op_lock_and_prep function

All non-binding operations in VM bind IOCTL should be in the lock and
prepare step rather than the execution step. Move prefetch to conform to
this pattern.

v2:
 - Rebase
 - New function names (Oak)
 - Update stale comment (Oak)

Cc: Oak Zeng <oak.zeng@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarOak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-4-matthew.brost@intel.com
parent 75192758
...@@ -1937,20 +1937,10 @@ static const u32 region_to_mem_type[] = { ...@@ -1937,20 +1937,10 @@ static const u32 region_to_mem_type[] = {
static struct dma_fence * static struct dma_fence *
xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, u32 region, struct xe_exec_queue *q, struct xe_sync_entry *syncs,
struct xe_sync_entry *syncs, u32 num_syncs, u32 num_syncs, bool first_op, bool last_op)
bool first_op, bool last_op)
{ {
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
int err;
xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type));
if (!xe_vma_has_no_bo(vma)) {
err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
if (err)
return ERR_PTR(err);
}
if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) { if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
...@@ -2490,8 +2480,7 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma, ...@@ -2490,8 +2480,7 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
op->flags & XE_VMA_OP_LAST); op->flags & XE_VMA_OP_LAST);
break; break;
case DRM_GPUVA_OP_PREFETCH: case DRM_GPUVA_OP_PREFETCH:
fence = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region, fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
op->syncs, op->num_syncs,
op->flags & XE_VMA_OP_FIRST, op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST); op->flags & XE_VMA_OP_LAST);
break; break;
...@@ -2722,9 +2711,20 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, ...@@ -2722,9 +2711,20 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
false); false);
break; break;
case DRM_GPUVA_OP_PREFETCH: case DRM_GPUVA_OP_PREFETCH:
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
u32 region = op->prefetch.region;
xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
err = vma_lock_and_validate(exec, err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va), true); gpuva_to_vma(op->base.prefetch.va),
false);
if (!err && !xe_vma_has_no_bo(vma))
err = xe_bo_migrate(xe_vma_bo(vma),
region_to_mem_type[region]);
break; break;
}
default: default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE"); drm_warn(&vm->xe->drm, "NOT POSSIBLE");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment