Commit 33991ae8 authored by Matthew Brost's avatar Matthew Brost

drm/xe: Simplify locking in new_vma

Rather than acquiring and dropping the VM / BO dma-resv around
xe_vma_create and do the same thing upon adding preempt fences or an
error, hold these locks through the entire new_vma() function.

v2:
 - Rebase (CI)

Cc: Fei Yang <fei.yang@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarJagmeet Randhawa <jagmeet.randhawa@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240618003859.3239239-1-matthew.brost@intel.com
parent 0d39640a
...@@ -180,16 +180,14 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) ...@@ -180,16 +180,14 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
struct xe_exec_queue *q; struct xe_exec_queue *q;
int err; int err;
xe_bo_assert_held(bo);
if (!vm->preempt.num_exec_queues) if (!vm->preempt.num_exec_queues)
return 0; return 0;
err = xe_bo_lock(bo, true);
if (err)
return err;
err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
if (err) if (err)
goto out_unlock; return err;
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
if (q->lr.pfence) { if (q->lr.pfence) {
...@@ -198,9 +196,7 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) ...@@ -198,9 +196,7 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
} }
out_unlock: return 0;
xe_bo_unlock(bo);
return err;
} }
static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
...@@ -2140,7 +2136,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, ...@@ -2140,7 +2136,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL; struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
struct drm_exec exec; struct drm_exec exec;
struct xe_vma *vma; struct xe_vma *vma;
int err; int err = 0;
lockdep_assert_held_write(&vm->lock); lockdep_assert_held_write(&vm->lock);
...@@ -2165,23 +2161,22 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, ...@@ -2165,23 +2161,22 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
vma = xe_vma_create(vm, bo, op->gem.offset, vma = xe_vma_create(vm, bo, op->gem.offset,
op->va.addr, op->va.addr + op->va.addr, op->va.addr +
op->va.range - 1, pat_index, flags); op->va.range - 1, pat_index, flags);
if (bo) if (IS_ERR(vma))
drm_exec_fini(&exec); goto err_unlock;
if (xe_vma_is_userptr(vma)) { if (xe_vma_is_userptr(vma))
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
if (err) { else if (!xe_vma_has_no_bo(vma) && !bo->vm)
prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma);
return ERR_PTR(err);
}
} else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
err = add_preempt_fences(vm, bo); err = add_preempt_fences(vm, bo);
err_unlock:
if (bo)
drm_exec_fini(&exec);
if (err) { if (err) {
prep_vma_destroy(vm, vma, false); prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma); xe_vma_destroy_unlocked(vma);
return ERR_PTR(err); vma = ERR_PTR(err);
}
} }
return vma; return vma;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment