Commit 08a4f00e authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe/bo: Simplify xe_bo_lock()

xe_bo_lock() was, although it only grabbed a single lock, unnecessarily
using ttm_eu_reserve_buffers(). Simplify and document the interface.

v2:
- Update also the xe_display subsystem.
v4:
- Reinstate a lost dma_resv_reserve_fences().
- Improve on xe_bo_lock() documentation (Matthew Brost)
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230908091716.36984-2-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 9fa81f91
......@@ -204,9 +204,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_bo;
}
xe_bo_lock(external, &ww, 0, false);
xe_bo_lock(external, false);
err = xe_bo_pin_external(external);
xe_bo_unlock(external, &ww);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo pin err=%pe\n",
ERR_PTR(err));
......@@ -272,9 +272,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
ERR_PTR(err));
goto cleanup_all;
}
xe_bo_lock(external, &ww, 0, false);
xe_bo_lock(external, false);
err = xe_bo_validate(external, NULL, false);
xe_bo_unlock(external, &ww);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo valid err=%pe\n",
ERR_PTR(err));
......@@ -282,28 +282,28 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
}
}
xe_bo_lock(external, &ww, 0, false);
xe_bo_lock(external, false);
xe_bo_unpin_external(external);
xe_bo_unlock(external, &ww);
xe_bo_unlock(external);
xe_bo_put(external);
xe_bo_lock(bo, &ww, 0, false);
xe_bo_lock(bo, false);
__xe_bo_unset_bulk_move(bo);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
xe_bo_put(bo);
continue;
cleanup_all:
xe_bo_lock(external, &ww, 0, false);
xe_bo_lock(external, false);
xe_bo_unpin_external(external);
xe_bo_unlock(external, &ww);
xe_bo_unlock(external);
cleanup_external:
xe_bo_put(external);
cleanup_bo:
xe_bo_lock(bo, &ww, 0, false);
xe_bo_lock(bo, false);
__xe_bo_unset_bulk_move(bo);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
xe_bo_put(bo);
break;
}
......
......@@ -1082,13 +1082,11 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
struct xe_bo *bo = gem_to_xe_bo(obj);
if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
struct ww_acquire_ctx ww;
XE_WARN_ON(!xe_bo_is_user(bo));
xe_bo_lock(bo, &ww, 0, false);
xe_bo_lock(bo, false);
ttm_bo_set_bulk_move(&bo->ttm, NULL);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
}
}
......@@ -1873,26 +1871,37 @@ int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
return 0;
}
int xe_bo_lock(struct xe_bo *bo, struct ww_acquire_ctx *ww,
int num_resv, bool intr)
/**
* xe_bo_lock() - Lock the buffer object's dma_resv object
* @bo: The struct xe_bo whose lock is to be taken
* @intr: Whether to perform any wait interruptible
*
* Locks the buffer object's dma_resv object. If the buffer object is
* pointing to a shared dma_resv object, that shared lock is locked.
*
* Return: 0 on success, -EINTR if @intr is true and the wait for a
* contended lock was interrupted. If @intr is set to false, the
* function always returns 0.
*/
int xe_bo_lock(struct xe_bo *bo, bool intr)
{
struct ttm_validate_buffer tv_bo;
LIST_HEAD(objs);
LIST_HEAD(dups);
if (intr)
return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
XE_WARN_ON(!ww);
dma_resv_lock(bo->ttm.base.resv, NULL);
tv_bo.num_shared = num_resv;
tv_bo.bo = &bo->ttm;
list_add_tail(&tv_bo.head, &objs);
return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
return 0;
}
void xe_bo_unlock(struct xe_bo *bo, struct ww_acquire_ctx *ww)
/**
* xe_bo_unlock() - Unlock the buffer object's dma_resv object
* @bo: The struct xe_bo whose lock is to be released.
*
* Unlock a buffer object lock that was locked by xe_bo_lock().
*/
void xe_bo_unlock(struct xe_bo *bo)
{
dma_resv_unlock(bo->ttm.base.resv);
ww_acquire_fini(ww);
}
/**
......
......@@ -158,10 +158,9 @@ static inline void xe_bo_assert_held(struct xe_bo *bo)
dma_resv_assert_held((bo)->ttm.base.resv);
}
int xe_bo_lock(struct xe_bo *bo, struct ww_acquire_ctx *ww,
int num_resv, bool intr);
int xe_bo_lock(struct xe_bo *bo, bool intr);
void xe_bo_unlock(struct xe_bo *bo, struct ww_acquire_ctx *ww);
void xe_bo_unlock(struct xe_bo *bo);
static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
{
......
......@@ -27,7 +27,6 @@
int xe_bo_evict_all(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
struct ww_acquire_ctx ww;
struct xe_bo *bo;
struct xe_tile *tile;
struct list_head still_in_list;
......@@ -62,9 +61,9 @@ int xe_bo_evict_all(struct xe_device *xe)
list_move_tail(&bo->pinned_link, &still_in_list);
spin_unlock(&xe->pinned.lock);
xe_bo_lock(bo, &ww, 0, false);
xe_bo_lock(bo, false);
ret = xe_bo_evict_pinned(bo);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
xe_bo_put(bo);
if (ret) {
spin_lock(&xe->pinned.lock);
......@@ -96,9 +95,9 @@ int xe_bo_evict_all(struct xe_device *xe)
list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
spin_unlock(&xe->pinned.lock);
xe_bo_lock(bo, &ww, 0, false);
xe_bo_lock(bo, false);
ret = xe_bo_evict_pinned(bo);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
xe_bo_put(bo);
if (ret)
return ret;
......@@ -123,7 +122,6 @@ int xe_bo_evict_all(struct xe_device *xe)
*/
int xe_bo_restore_kernel(struct xe_device *xe)
{
struct ww_acquire_ctx ww;
struct xe_bo *bo;
int ret;
......@@ -140,9 +138,9 @@ int xe_bo_restore_kernel(struct xe_device *xe)
list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
xe_bo_lock(bo, &ww, 0, false);
xe_bo_lock(bo, false);
ret = xe_bo_restore_pinned(bo);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
if (ret) {
xe_bo_put(bo);
return ret;
......@@ -184,7 +182,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
*/
int xe_bo_restore_user(struct xe_device *xe)
{
struct ww_acquire_ctx ww;
struct xe_bo *bo;
struct xe_tile *tile;
struct list_head still_in_list;
......@@ -206,9 +203,9 @@ int xe_bo_restore_user(struct xe_device *xe)
xe_bo_get(bo);
spin_unlock(&xe->pinned.lock);
xe_bo_lock(bo, &ww, 0, false);
xe_bo_lock(bo, false);
ret = xe_bo_restore_pinned(bo);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
xe_bo_put(bo);
if (ret) {
spin_lock(&xe->pinned.lock);
......
......@@ -171,20 +171,18 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
/* Lock VM and BOs dma-resv */
bo = xe_vma_bo(vma);
if (only_needs_bo_lock(bo)) {
/* This path ensures the BO's LRU is updated */
ret = xe_bo_lock(bo, &ww, xe->info.tile_count, false);
} else {
if (!only_needs_bo_lock(bo)) {
tv_vm.num_shared = xe->info.tile_count;
tv_vm.bo = xe_vm_ttm_bo(vm);
list_add(&tv_vm.head, &objs);
if (bo) {
tv_bo.bo = &bo->ttm;
tv_bo.num_shared = xe->info.tile_count;
list_add(&tv_bo.head, &objs);
}
ret = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
}
if (bo) {
tv_bo.bo = &bo->ttm;
tv_bo.num_shared = xe->info.tile_count;
list_add(&tv_bo.head, &objs);
}
ret = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
if (ret)
goto unlock_vm;
......@@ -227,10 +225,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
vma->usm.tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
if (only_needs_bo_lock(bo))
xe_bo_unlock(bo, &ww);
else
ttm_eu_backoff_reservation(&ww, &objs);
ttm_eu_backoff_reservation(&ww, &objs);
unlock_vm:
if (!ret)
vm->usm.last_fault_vma = vma;
......@@ -534,28 +529,22 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
/* Lock VM and BOs dma-resv */
bo = xe_vma_bo(vma);
if (only_needs_bo_lock(bo)) {
/* This path ensures the BO's LRU is updated */
ret = xe_bo_lock(bo, &ww, xe->info.tile_count, false);
} else {
if (!only_needs_bo_lock(bo)) {
tv_vm.num_shared = xe->info.tile_count;
tv_vm.bo = xe_vm_ttm_bo(vm);
list_add(&tv_vm.head, &objs);
tv_bo.bo = &bo->ttm;
tv_bo.num_shared = xe->info.tile_count;
list_add(&tv_bo.head, &objs);
ret = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
}
tv_bo.bo = &bo->ttm;
tv_bo.num_shared = xe->info.tile_count;
list_add(&tv_bo.head, &objs);
ret = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
if (ret)
goto unlock_vm;
/* Migrate to VRAM, move should invalidate the VMA first */
ret = xe_bo_migrate(bo, XE_PL_VRAM0 + tile->id);
if (only_needs_bo_lock(bo))
xe_bo_unlock(bo, &ww);
else
ttm_eu_backoff_reservation(&ww, &objs);
ttm_eu_backoff_reservation(&ww, &objs);
unlock_vm:
up_read(&vm->lock);
xe_vm_put(vm);
......
......@@ -267,13 +267,16 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
{
struct xe_exec_queue *q;
struct ww_acquire_ctx ww;
int err;
err = xe_bo_lock(bo, &ww, vm->preempt.num_exec_queues, true);
err = xe_bo_lock(bo, true);
if (err)
return err;
err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
if (err)
goto out_unlock;
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
if (q->compute.pfence) {
dma_resv_add_fence(bo->ttm.base.resv,
......@@ -281,8 +284,9 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
DMA_RESV_USAGE_BOOKKEEP);
}
xe_bo_unlock(bo, &ww);
return 0;
out_unlock:
xe_bo_unlock(bo);
return err;
}
/**
......@@ -1033,12 +1037,11 @@ bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
struct xe_vma *ignore)
{
struct ww_acquire_ctx ww;
bool ret;
xe_bo_lock(bo, &ww, 0, false);
xe_bo_lock(bo, false);
ret = !!bo_has_vm_references_locked(bo, vm, ignore);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
return ret;
}
......@@ -2264,7 +2267,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
u32 operation, u8 tile_mask, u32 region)
{
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct ww_acquire_ctx ww;
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
struct xe_vma_op *op;
......@@ -2323,7 +2325,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
case XE_VM_BIND_OP_UNMAP_ALL:
XE_WARN_ON(!bo);
err = xe_bo_lock(bo, &ww, 0, true);
err = xe_bo_lock(bo, true);
if (err)
return ERR_PTR(err);
......@@ -2333,7 +2335,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
drm_gpuvm_bo_put(vm_bo);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
if (IS_ERR(ops))
return ops;
......@@ -2369,13 +2371,12 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
{
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
struct xe_vma *vma;
struct ww_acquire_ctx ww;
int err;
lockdep_assert_held_write(&vm->lock);
if (bo) {
err = xe_bo_lock(bo, &ww, 0, true);
err = xe_bo_lock(bo, true);
if (err)
return ERR_PTR(err);
}
......@@ -2384,7 +2385,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
op->va.range - 1, read_only, is_null,
tile_mask);
if (bo)
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
if (xe_vma_is_userptr(vma)) {
err = xe_vma_userptr_pin_pages(vma);
......
......@@ -28,16 +28,15 @@ static int madvise_preferred_mem_class(struct xe_device *xe, struct xe_vm *vm,
for (i = 0; i < num_vmas; ++i) {
struct xe_bo *bo;
struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]);
err = xe_bo_lock(bo, &ww, 0, true);
err = xe_bo_lock(bo, true);
if (err)
return err;
bo->props.preferred_mem_class = value;
xe_bo_placement_for_flags(xe, bo, bo->flags);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
}
return 0;
......@@ -53,16 +52,15 @@ static int madvise_preferred_gt(struct xe_device *xe, struct xe_vm *vm,
for (i = 0; i < num_vmas; ++i) {
struct xe_bo *bo;
struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]);
err = xe_bo_lock(bo, &ww, 0, true);
err = xe_bo_lock(bo, true);
if (err)
return err;
bo->props.preferred_gt = value;
xe_bo_placement_for_flags(xe, bo, bo->flags);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
}
return 0;
......@@ -89,17 +87,16 @@ static int madvise_preferred_mem_class_gt(struct xe_device *xe,
for (i = 0; i < num_vmas; ++i) {
struct xe_bo *bo;
struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]);
err = xe_bo_lock(bo, &ww, 0, true);
err = xe_bo_lock(bo, true);
if (err)
return err;
bo->props.preferred_mem_class = mem_class;
bo->props.preferred_gt = gt_id;
xe_bo_placement_for_flags(xe, bo, bo->flags);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
}
return 0;
......@@ -112,13 +109,12 @@ static int madvise_cpu_atomic(struct xe_device *xe, struct xe_vm *vm,
for (i = 0; i < num_vmas; ++i) {
struct xe_bo *bo;
struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]);
if (XE_IOCTL_DBG(xe, !(bo->flags & XE_BO_CREATE_SYSTEM_BIT)))
return -EINVAL;
err = xe_bo_lock(bo, &ww, 0, true);
err = xe_bo_lock(bo, true);
if (err)
return err;
bo->props.cpu_atomic = !!value;
......@@ -130,7 +126,7 @@ static int madvise_cpu_atomic(struct xe_device *xe, struct xe_vm *vm,
*/
if (bo->props.cpu_atomic)
ttm_bo_unmap_virtual(&bo->ttm);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
}
return 0;
......@@ -143,18 +139,17 @@ static int madvise_device_atomic(struct xe_device *xe, struct xe_vm *vm,
for (i = 0; i < num_vmas; ++i) {
struct xe_bo *bo;
struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]);
if (XE_IOCTL_DBG(xe, !(bo->flags & XE_BO_CREATE_VRAM0_BIT) &&
!(bo->flags & XE_BO_CREATE_VRAM1_BIT)))
return -EINVAL;
err = xe_bo_lock(bo, &ww, 0, true);
err = xe_bo_lock(bo, true);
if (err)
return err;
bo->props.device_atomic = !!value;
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
}
return 0;
......@@ -174,16 +169,15 @@ static int madvise_priority(struct xe_device *xe, struct xe_vm *vm,
for (i = 0; i < num_vmas; ++i) {
struct xe_bo *bo;
struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]);
err = xe_bo_lock(bo, &ww, 0, true);
err = xe_bo_lock(bo, true);
if (err)
return err;
bo->ttm.priority = value;
ttm_bo_move_to_lru_tail(&bo->ttm);
xe_bo_unlock(bo, &ww);
xe_bo_unlock(bo);
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment