Commit d00e9cc2 authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe/vm: Simplify and document xe_vm_lock()

The xe_vm_lock() function was unnecessarily using ttm_eu_reserve_buffers().
Simplify and document the interface.

v4:
- Improve on xe_vm_lock() documentation (Matthew Brost)
v5:
- Rebase conflict.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230908091716.36984-3-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 08a4f00e
...@@ -180,7 +180,6 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc ...@@ -180,7 +180,6 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
unsigned int bo_flags = XE_BO_CREATE_USER_BIT | unsigned int bo_flags = XE_BO_CREATE_USER_BIT |
XE_BO_CREATE_VRAM_IF_DGFX(tile); XE_BO_CREATE_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate); struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct ww_acquire_ctx ww;
struct xe_gt *__gt; struct xe_gt *__gt;
int err, i, id; int err, i, id;
...@@ -188,10 +187,10 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc ...@@ -188,10 +187,10 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
dev_name(xe->drm.dev), tile->id); dev_name(xe->drm.dev), tile->id);
for (i = 0; i < 2; ++i) { for (i = 0; i < 2; ++i) {
xe_vm_lock(vm, &ww, 0, false); xe_vm_lock(vm, false);
bo = xe_bo_create(xe, NULL, vm, 0x10000, ttm_bo_type_device, bo = xe_bo_create(xe, NULL, vm, 0x10000, ttm_bo_type_device,
bo_flags); bo_flags);
xe_vm_unlock(vm, &ww); xe_vm_unlock(vm);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
KUNIT_FAIL(test, "bo create err=%pe\n", bo); KUNIT_FAIL(test, "bo create err=%pe\n", bo);
break; break;
...@@ -263,9 +262,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc ...@@ -263,9 +262,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
if (i) { if (i) {
down_read(&vm->lock); down_read(&vm->lock);
xe_vm_lock(vm, &ww, 0, false); xe_vm_lock(vm, false);
err = xe_bo_validate(bo, bo->vm, false); err = xe_bo_validate(bo, bo->vm, false);
xe_vm_unlock(vm, &ww); xe_vm_unlock(vm);
up_read(&vm->lock); up_read(&vm->lock);
if (err) { if (err) {
KUNIT_FAIL(test, "bo valid err=%pe\n", KUNIT_FAIL(test, "bo valid err=%pe\n",
......
...@@ -396,14 +396,13 @@ static int migrate_test_run_device(struct xe_device *xe) ...@@ -396,14 +396,13 @@ static int migrate_test_run_device(struct xe_device *xe)
for_each_tile(tile, xe, id) { for_each_tile(tile, xe, id) {
struct xe_migrate *m = tile->migrate; struct xe_migrate *m = tile->migrate;
struct ww_acquire_ctx ww;
kunit_info(test, "Testing tile id %d.\n", id); kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->q->vm, &ww, 0, true); xe_vm_lock(m->q->vm, true);
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
xe_migrate_sanity_test(m, test); xe_migrate_sanity_test(m, test);
xe_device_mem_access_put(xe); xe_device_mem_access_put(xe);
xe_vm_unlock(m->q->vm, &ww); xe_vm_unlock(m->q->vm);
} }
return 0; return 0;
......
...@@ -1759,7 +1759,6 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -1759,7 +1759,6 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev); struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file); struct xe_file *xef = to_xe_file(file);
struct drm_xe_gem_create *args = data; struct drm_xe_gem_create *args = data;
struct ww_acquire_ctx ww;
struct xe_vm *vm = NULL; struct xe_vm *vm = NULL;
struct xe_bo *bo; struct xe_bo *bo;
unsigned int bo_flags = XE_BO_CREATE_USER_BIT; unsigned int bo_flags = XE_BO_CREATE_USER_BIT;
...@@ -1812,7 +1811,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -1812,7 +1811,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
vm = xe_vm_lookup(xef, args->vm_id); vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_DBG(xe, !vm)) if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT; return -ENOENT;
err = xe_vm_lock(vm, &ww, 0, true); err = xe_vm_lock(vm, true);
if (err) { if (err) {
xe_vm_put(vm); xe_vm_put(vm);
return err; return err;
...@@ -1840,7 +1839,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -1840,7 +1839,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
xe_bo_put(bo); xe_bo_put(bo);
out_vm: out_vm:
if (vm) { if (vm) {
xe_vm_unlock(vm, &ww); xe_vm_unlock(vm);
xe_vm_put(vm); xe_vm_put(vm);
} }
return err; return err;
......
...@@ -111,18 +111,17 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v ...@@ -111,18 +111,17 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
u32 logical_mask, u16 width, u32 logical_mask, u16 width,
struct xe_hw_engine *hwe, u32 flags) struct xe_hw_engine *hwe, u32 flags)
{ {
struct ww_acquire_ctx ww;
struct xe_exec_queue *q; struct xe_exec_queue *q;
int err; int err;
if (vm) { if (vm) {
err = xe_vm_lock(vm, &ww, 0, true); err = xe_vm_lock(vm, true);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
} }
q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags); q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
if (vm) if (vm)
xe_vm_unlock(vm, &ww); xe_vm_unlock(vm);
return q; return q;
} }
......
...@@ -789,16 +789,14 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, ...@@ -789,16 +789,14 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
void xe_lrc_finish(struct xe_lrc *lrc) void xe_lrc_finish(struct xe_lrc *lrc)
{ {
struct ww_acquire_ctx ww;
xe_hw_fence_ctx_finish(&lrc->fence_ctx); xe_hw_fence_ctx_finish(&lrc->fence_ctx);
if (lrc->bo->vm) if (lrc->bo->vm)
xe_vm_lock(lrc->bo->vm, &ww, 0, false); xe_vm_lock(lrc->bo->vm, false);
else else
xe_bo_lock_no_vm(lrc->bo, NULL); xe_bo_lock_no_vm(lrc->bo, NULL);
xe_bo_unpin(lrc->bo); xe_bo_unpin(lrc->bo);
if (lrc->bo->vm) if (lrc->bo->vm)
xe_vm_unlock(lrc->bo->vm, &ww); xe_vm_unlock(lrc->bo->vm);
else else
xe_bo_unlock_no_vm(lrc->bo); xe_bo_unlock_no_vm(lrc->bo);
xe_bo_put(lrc->bo); xe_bo_put(lrc->bo);
......
...@@ -88,13 +88,12 @@ struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile) ...@@ -88,13 +88,12 @@ struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
static void xe_migrate_fini(struct drm_device *dev, void *arg) static void xe_migrate_fini(struct drm_device *dev, void *arg)
{ {
struct xe_migrate *m = arg; struct xe_migrate *m = arg;
struct ww_acquire_ctx ww;
xe_vm_lock(m->q->vm, &ww, 0, false); xe_vm_lock(m->q->vm, false);
xe_bo_unpin(m->pt_bo); xe_bo_unpin(m->pt_bo);
if (m->cleared_bo) if (m->cleared_bo)
xe_bo_unpin(m->cleared_bo); xe_bo_unpin(m->cleared_bo);
xe_vm_unlock(m->q->vm, &ww); xe_vm_unlock(m->q->vm);
dma_fence_put(m->fence); dma_fence_put(m->fence);
if (m->cleared_bo) if (m->cleared_bo)
...@@ -338,7 +337,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) ...@@ -338,7 +337,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
struct xe_gt *primary_gt = tile->primary_gt; struct xe_gt *primary_gt = tile->primary_gt;
struct xe_migrate *m; struct xe_migrate *m;
struct xe_vm *vm; struct xe_vm *vm;
struct ww_acquire_ctx ww;
int err; int err;
m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL); m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
...@@ -353,9 +351,9 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) ...@@ -353,9 +351,9 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (IS_ERR(vm)) if (IS_ERR(vm))
return ERR_CAST(vm); return ERR_CAST(vm);
xe_vm_lock(vm, &ww, 0, false); xe_vm_lock(vm, false);
err = xe_migrate_prepare_vm(tile, m, vm); err = xe_migrate_prepare_vm(tile, m, vm);
xe_vm_unlock(vm, &ww); xe_vm_unlock(vm);
if (err) { if (err) {
xe_vm_close_and_put(vm); xe_vm_close_and_put(vm);
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -523,18 +523,17 @@ void xe_vm_unlock_dma_resv(struct xe_vm *vm, ...@@ -523,18 +523,17 @@ void xe_vm_unlock_dma_resv(struct xe_vm *vm,
static void xe_vm_kill(struct xe_vm *vm) static void xe_vm_kill(struct xe_vm *vm)
{ {
struct ww_acquire_ctx ww;
struct xe_exec_queue *q; struct xe_exec_queue *q;
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
xe_vm_lock(vm, &ww, 0, false); xe_vm_lock(vm, false);
vm->flags |= XE_VM_FLAG_BANNED; vm->flags |= XE_VM_FLAG_BANNED;
trace_xe_vm_kill(vm); trace_xe_vm_kill(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
q->ops->kill(q); q->ops->kill(q);
xe_vm_unlock(vm, &ww); xe_vm_unlock(vm);
/* TODO: Inform user the VM is banned */ /* TODO: Inform user the VM is banned */
} }
...@@ -1420,7 +1419,6 @@ static void xe_vm_close(struct xe_vm *vm) ...@@ -1420,7 +1419,6 @@ static void xe_vm_close(struct xe_vm *vm)
void xe_vm_close_and_put(struct xe_vm *vm) void xe_vm_close_and_put(struct xe_vm *vm)
{ {
LIST_HEAD(contested); LIST_HEAD(contested);
struct ww_acquire_ctx ww;
struct xe_device *xe = vm->xe; struct xe_device *xe = vm->xe;
struct xe_tile *tile; struct xe_tile *tile;
struct xe_vma *vma, *next_vma; struct xe_vma *vma, *next_vma;
...@@ -1443,7 +1441,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1443,7 +1441,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
} }
down_write(&vm->lock); down_write(&vm->lock);
xe_vm_lock(vm, &ww, 0, false); xe_vm_lock(vm, false);
drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
vma = gpuva_to_vma(gpuva); vma = gpuva_to_vma(gpuva);
...@@ -1488,7 +1486,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1488,7 +1486,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
vm->pt_root[id] = NULL; vm->pt_root[id] = NULL;
} }
} }
xe_vm_unlock(vm, &ww); xe_vm_unlock(vm);
/* /*
* VM is now dead, cannot re-add nodes to vm->vmas if it's NULL * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
...@@ -3442,30 +3440,32 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3442,30 +3440,32 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return err == -ENODATA ? 0 : err; return err == -ENODATA ? 0 : err;
} }
/* /**
* XXX: Using the TTM wrappers for now, likely can call into dma-resv code * xe_vm_lock() - Lock the vm's dma_resv object
* directly to optimize. Also this likely should be an inline function. * @vm: The struct xe_vm whose lock is to be locked
* @intr: Whether to perform any wait interruptible
*
* Return: 0 on success, -EINTR if @intr is true and the wait for a
* contended lock was interrupted. If @intr is false, the function
* always returns 0.
*/ */
int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww, int xe_vm_lock(struct xe_vm *vm, bool intr)
int num_resv, bool intr)
{ {
struct ttm_validate_buffer tv_vm; if (intr)
LIST_HEAD(objs); return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
LIST_HEAD(dups);
XE_WARN_ON(!ww); return dma_resv_lock(xe_vm_resv(vm), NULL);
tv_vm.num_shared = num_resv;
tv_vm.bo = xe_vm_ttm_bo(vm);
list_add_tail(&tv_vm.head, &objs);
return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
} }
void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww) /**
* xe_vm_unlock() - Unlock the vm's dma_resv object
* @vm: The struct xe_vm whose lock is to be released.
*
* Unlock a buffer object lock that was locked by xe_vm_lock().
*/
void xe_vm_unlock(struct xe_vm *vm)
{ {
dma_resv_unlock(xe_vm_resv(vm)); dma_resv_unlock(xe_vm_resv(vm));
ww_acquire_fini(ww);
} }
/** /**
......
...@@ -38,10 +38,9 @@ static inline void xe_vm_put(struct xe_vm *vm) ...@@ -38,10 +38,9 @@ static inline void xe_vm_put(struct xe_vm *vm)
drm_gpuvm_put(&vm->gpuvm); drm_gpuvm_put(&vm->gpuvm);
} }
int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww, int xe_vm_lock(struct xe_vm *vm, bool intr);
int num_resv, bool intr);
void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww); void xe_vm_unlock(struct xe_vm *vm);
static inline bool xe_vm_is_closed(struct xe_vm *vm) static inline bool xe_vm_is_closed(struct xe_vm *vm)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment