Commit de8390b1 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe/sched_job: Promote xe_sched_job_add_deps()

Move it out of the xe_migrate compilation unit so it can be re-used in
other places.

Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240614094433.775866-1-francois.dugast@intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 8c57c4dc
...@@ -259,7 +259,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -259,7 +259,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
/* Wait behind rebinds */ /* Wait behind rebinds */
if (!xe_vm_in_lr_mode(vm)) { if (!xe_vm_in_lr_mode(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm, err = xe_sched_job_add_deps(job,
xe_vm_resv(vm), xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL); DMA_RESV_USAGE_KERNEL);
if (err) if (err)
......
...@@ -647,12 +647,6 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, ...@@ -647,12 +647,6 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
bb->cs[bb->len++] = upper_32_bits(src_ofs); bb->cs[bb->len++] = upper_32_bits(src_ofs);
} }
static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
enum dma_resv_usage usage)
{
return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
}
static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm) static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
{ {
return usm ? m->usm_batch_base_ofs : m->batch_base_ofs; return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
...@@ -849,10 +843,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -849,10 +843,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, flush_flags); xe_sched_job_add_migrate_flush(job, flush_flags);
if (!fence) { if (!fence) {
err = job_add_deps(job, src_bo->ttm.base.resv, err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
if (!err && src_bo != dst_bo) if (!err && src_bo != dst_bo)
err = job_add_deps(job, dst_bo->ttm.base.resv, err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
if (err) if (err)
goto err_job; goto err_job;
...@@ -1091,7 +1085,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, ...@@ -1091,7 +1085,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
* fences, which are always tracked as * fences, which are always tracked as
* DMA_RESV_USAGE_KERNEL. * DMA_RESV_USAGE_KERNEL.
*/ */
err = job_add_deps(job, bo->ttm.base.resv, err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
DMA_RESV_USAGE_KERNEL); DMA_RESV_USAGE_KERNEL);
if (err) if (err)
goto err_job; goto err_job;
...@@ -1417,7 +1411,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1417,7 +1411,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
/* Wait on BO move */ /* Wait on BO move */
if (bo) { if (bo) {
err = job_add_deps(job, bo->ttm.base.resv, err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
DMA_RESV_USAGE_KERNEL); DMA_RESV_USAGE_KERNEL);
if (err) if (err)
goto err_job; goto err_job;
...@@ -1428,7 +1422,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1428,7 +1422,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
* trigger preempts before moving forward * trigger preempts before moving forward
*/ */
if (first_munmap_rebind) { if (first_munmap_rebind) {
err = job_add_deps(job, xe_vm_resv(vm), err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
if (err) if (err)
goto err_job; goto err_job;
......
...@@ -363,3 +363,9 @@ xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, ...@@ -363,3 +363,9 @@ xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot,
for (i = 0; i < snapshot->batch_addr_len; i++) for (i = 0; i < snapshot->batch_addr_len; i++)
drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]); drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]);
} }
int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
enum dma_resv_usage usage)
{
return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
}
...@@ -90,4 +90,7 @@ struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job ...@@ -90,4 +90,7 @@ struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job
void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot); void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot);
void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p); void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p);
int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
enum dma_resv_usage usage);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment