Commit 24f947d5 authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe: Use DRM GPUVM helpers for external- and evicted objects

Adapt to the DRM_GPUVM helpers moving removing a lot of complicated
driver-specific code.

For now this uses fine-grained locking for the evict list and external
object list, which may incur a slight performance penalty in some
situations.

v2:
- Don't lock all bos and validate on LR exec submissions (Matthew Brost)
- Add some kerneldoc
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231212100144.6833-2-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 49e134e1
......@@ -468,9 +468,9 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
struct drm_gpuva *gpuva;
struct drm_gem_object *obj = &bo->ttm.base;
struct drm_gpuvm_bo *vm_bo;
bool idle = false;
int ret = 0;
dma_resv_assert_held(bo->ttm.base.resv);
......@@ -484,14 +484,15 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
}
drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
struct xe_vma *vma = gpuva_to_vma(gpuva);
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
struct drm_gpuva *gpuva;
trace_xe_vma_evict(vma);
if (!xe_vm_in_fault_mode(vm)) {
drm_gpuvm_bo_evict(vm_bo, true);
continue;
}
if (xe_vm_in_fault_mode(vm)) {
/* Wait for pending binds / unbinds. */
if (!idle) {
long timeout;
if (ctx->no_wait_gpu &&
......@@ -503,45 +504,21 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
DMA_RESV_USAGE_BOOKKEEP,
ctx->interruptible,
MAX_SCHEDULE_TIMEOUT);
if (timeout > 0) {
ret = xe_vm_invalidate_vma(vma);
XE_WARN_ON(ret);
} else if (!timeout) {
ret = -ETIME;
} else {
ret = timeout;
}
} else {
bool vm_resv_locked = false;
if (!timeout)
return -ETIME;
if (timeout < 0)
return timeout;
/*
* We need to put the vma on the vm's rebind_list,
* but need the vm resv to do so. If we can't verify
* that we indeed have it locked, put the vma an the
* vm's notifier.rebind_list instead and scoop later.
*/
if (dma_resv_trylock(xe_vm_resv(vm)))
vm_resv_locked = true;
else if (ctx->resv != xe_vm_resv(vm)) {
spin_lock(&vm->notifier.list_lock);
if (!(vma->gpuva.flags & XE_VMA_DESTROYED))
list_move_tail(&vma->notifier.rebind_link,
&vm->notifier.rebind_list);
spin_unlock(&vm->notifier.list_lock);
continue;
}
idle = true;
}
xe_vm_assert_held(vm);
if (vma->tile_present &&
!(vma->gpuva.flags & XE_VMA_DESTROYED) &&
list_empty(&vma->combined_links.rebind))
list_add_tail(&vma->combined_links.rebind,
&vm->rebind_list);
drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
struct xe_vma *vma = gpuva_to_vma(gpuva);
if (vm_resv_locked)
dma_resv_unlock(xe_vm_resv(vm));
}
trace_xe_vma_evict(vma);
ret = xe_vm_invalidate_vma(vma);
if (XE_WARN_ON(ret))
return ret;
}
}
......
......@@ -94,40 +94,9 @@
* Unlock all
*/
static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm)
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
struct xe_vma *vma;
LIST_HEAD(dups);
int err = 0;
if (xe_vm_in_lr_mode(vm))
return 0;
/*
* 1 fence for job from exec plus a fence for each tile from a possible
* rebind
*/
err = xe_vm_lock_dma_resv(vm, exec, 1 + vm->xe->info.tile_count, true);
if (err)
return err;
/*
* Validate BOs that have been evicted (i.e. make sure the
* BOs have valid placements possibly moving an evicted BO back
* to a location where the GPU can access it).
*/
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
xe_assert(vm->xe, !xe_vma_is_null(vma));
if (xe_vma_is_userptr(vma))
continue;
err = xe_bo_validate(xe_vma_bo(vma), vm, false);
if (err)
break;
}
return err;
return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
......@@ -140,7 +109,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_exec_queue *q;
struct xe_sync_entry *syncs = NULL;
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_exec exec;
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs = 0;
struct xe_sched_job *job;
struct dma_fence *rebind_fence;
......@@ -216,16 +186,18 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_unlock_list;
}
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
err = xe_exec_begin(&exec, vm);
drm_exec_retry_on_contention(&exec);
if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
err = -EAGAIN;
vm_exec.vm = &vm->gpuvm;
vm_exec.num_fences = 1 + vm->xe->info.tile_count;
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
if (xe_vm_in_lr_mode(vm)) {
drm_exec_init(exec, vm_exec.flags);
} else {
err = drm_gpuvm_exec_lock(&vm_exec);
if (err) {
if (xe_vm_validate_should_retry(exec, err, &end))
err = -EAGAIN;
goto err_unlock_list;
}
if (err)
goto err_exec;
}
if (xe_vm_is_closed_or_banned(q->vm)) {
......@@ -307,19 +279,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
* the job and let the DRM scheduler / backend clean up the job.
*/
xe_sched_job_arm(job);
if (!xe_vm_in_lr_mode(vm)) {
/* Block userptr invalidations / BO eviction */
dma_resv_add_fence(xe_vm_resv(vm),
&job->drm.s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP);
/*
* Make implicit sync work across drivers, assuming all external
* BOs are written as we don't pass in a read / write list.
*/
xe_vm_fence_all_extobjs(vm, &job->drm.s_fence->finished,
DMA_RESV_USAGE_WRITE);
}
if (!xe_vm_in_lr_mode(vm))
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], job,
......@@ -343,7 +305,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (err)
xe_sched_job_put(job);
err_exec:
drm_exec_fini(&exec);
drm_exec_fini(exec);
err_unlock_list:
if (write_locked)
up_write(&vm->lock);
......
This diff is collapsed.
......@@ -74,9 +74,20 @@ static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
}
/**
* gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
* @gpuvm: The struct drm_gpuvm pointer
*
* Return: Pointer to the embedding struct xe_vm.
*/
static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
{
return container_of(gpuvm, struct xe_vm, gpuvm);
}
static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
{
return container_of(gpuva->vm, struct xe_vm, gpuvm);
return gpuvm_to_vm(gpuva->vm);
}
static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
......@@ -219,12 +230,6 @@ int xe_vma_userptr_check_repin(struct xe_vma *vma);
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
unsigned int num_shared, bool lock_vm);
void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
enum dma_resv_usage usage);
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
......
......@@ -62,26 +62,17 @@ struct xe_vma {
/** @gpuva: Base GPUVA object */
struct drm_gpuva gpuva;
/** @combined_links: links into lists which are mutually exclusive */
/**
* @combined_links: links into lists which are mutually exclusive.
* Locking: vm lock in write mode OR vm lock in read mode and the vm's
* resv.
*/
union {
/**
* @userptr: link into VM repin list if userptr. Protected by
* vm->lock in write mode.
*/
/** @userptr: link into VM repin list if userptr. */
struct list_head userptr;
/**
* @rebind: link into VM if this VMA needs rebinding, and
* if it's a bo (not userptr) needs validation after a possible
* eviction. Protected by the vm's resv lock and typically
* vm->lock is also held in write mode. The only place where
* vm->lock isn't held is the BO eviction path which has
* mutually exclusive execution with userptr.
*/
/** @rebind: link into VM if this VMA needs rebinding. */
struct list_head rebind;
/**
* @destroy: link to contested list when VM is being closed.
* Protected by vm->lock in write mode and vm's resv lock.
*/
/** @destroy: link to contested list when VM is being closed. */
struct list_head destroy;
} combined_links;
......@@ -115,18 +106,6 @@ struct xe_vma {
*/
u16 pat_index;
struct {
struct list_head rebind_link;
} notifier;
struct {
/**
* @extobj.link: Link into vm's external object list.
* protected by the vm lock.
*/
struct list_head link;
} extobj;
/**
* @userptr: user pointer state, only allocated for VMAs that are
* user pointers
......@@ -180,9 +159,9 @@ struct xe_vm {
struct rw_semaphore lock;
/**
* @rebind_list: list of VMAs that need rebinding, and if they are
* bos (not userptr), need validation after a possible eviction. The
* list is protected by @resv.
* @rebind_list: list of VMAs that need rebinding. Protected by the
* vm->lock in write mode, OR (the vm->lock in read mode and the
* vm resv).
*/
struct list_head rebind_list;
......@@ -202,14 +181,6 @@ struct xe_vm {
*/
struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
/** @extobj: bookkeeping for external objects. Protected by the vm lock */
struct {
/** @enties: number of external BOs attached this VM */
u32 entries;
/** @list: list of vmas with external bos attached */
struct list_head list;
} extobj;
/** @async_ops: async VM operations (bind / unbinds) */
struct {
/** @list: list of pending async VM ops */
......@@ -299,22 +270,6 @@ struct xe_vm {
struct xe_vma *last_fault_vma;
} usm;
/**
* @notifier: Lists and locks for temporary usage within notifiers where
* we either can't grab the vm lock or the vm resv.
*/
struct {
/** @notifier.list_lock: lock protecting @rebind_list */
spinlock_t list_lock;
/**
* @notifier.rebind_list: list of vmas that we want to put on the
* main @rebind_list. This list is protected for writing by both
* notifier.list_lock, and the resv of the bo the vma points to,
* and for reading by the notifier.list_lock only.
*/
struct list_head rebind_list;
} notifier;
/** @error_capture: allow to track errors */
struct {
/** @capture_once: capture only one error per VM */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment