Commit 5dffaa1b authored by Nirmoy Das's avatar Nirmoy Das Committed by Lucas De Marchi

drm/xe: Create a helper function to init job's user fence

Refactor xe_sync_entry_signal so it doesn't have to
modify xe_sched_job struct instead create a new helper function
to set user fence values for a job.

v2: Move the sync type check to xe_sched_job_init_user_fence(Lucas)

Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240321161142.4954-1-nirmoy.das@intel.comSigned-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
parent 4b217c7f
...@@ -249,7 +249,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -249,7 +249,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_unlock_list; goto err_unlock_list;
} }
for (i = 0; i < num_syncs; i++) for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], NULL, fence); xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(q, vm, fence); xe_exec_queue_last_fence_set(q, vm, fence);
dma_fence_put(fence); dma_fence_put(fence);
} }
...@@ -359,9 +359,10 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -359,9 +359,10 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE); DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
for (i = 0; i < num_syncs; i++) for (i = 0; i < num_syncs; i++) {
xe_sync_entry_signal(&syncs[i], job, xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
&job->drm.s_fence->finished); xe_sched_job_init_user_fence(job, &syncs[i]);
}
if (xe_exec_queue_is_lr(q)) if (xe_exec_queue_is_lr(q))
q->ring_ops->emit_job(job); q->ring_ops->emit_job(job);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "xe_sched_job.h" #include "xe_sched_job.h"
#include <drm/xe_drm.h>
#include <linux/dma-fence-array.h> #include <linux/dma-fence-array.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -15,6 +16,7 @@ ...@@ -15,6 +16,7 @@
#include "xe_hw_fence.h" #include "xe_hw_fence.h"
#include "xe_lrc.h" #include "xe_lrc.h"
#include "xe_macros.h" #include "xe_macros.h"
#include "xe_sync_types.h"
#include "xe_trace.h" #include "xe_trace.h"
#include "xe_vm.h" #include "xe_vm.h"
...@@ -278,6 +280,22 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) ...@@ -278,6 +280,22 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
return drm_sched_job_add_dependency(&job->drm, fence); return drm_sched_job_add_dependency(&job->drm, fence);
} }
/**
* xe_sched_job_init_user_fence - Initialize user_fence for the job
* @job: job whose user_fence needs an init
* @sync: sync to be use to init user_fence
*/
void xe_sched_job_init_user_fence(struct xe_sched_job *job,
struct xe_sync_entry *sync)
{
if (sync->type != DRM_XE_SYNC_TYPE_USER_FENCE)
return;
job->user_fence.used = true;
job->user_fence.addr = sync->addr;
job->user_fence.value = sync->timeline_value;
}
struct xe_sched_job_snapshot * struct xe_sched_job_snapshot *
xe_sched_job_snapshot_capture(struct xe_sched_job *job) xe_sched_job_snapshot_capture(struct xe_sched_job *job)
{ {
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
struct drm_printer; struct drm_printer;
struct xe_vm; struct xe_vm;
struct xe_sync_entry;
#define XE_SCHED_HANG_LIMIT 1 #define XE_SCHED_HANG_LIMIT 1
#define XE_SCHED_JOB_TIMEOUT LONG_MAX #define XE_SCHED_JOB_TIMEOUT LONG_MAX
...@@ -58,6 +59,8 @@ void xe_sched_job_arm(struct xe_sched_job *job); ...@@ -58,6 +59,8 @@ void xe_sched_job_arm(struct xe_sched_job *job);
void xe_sched_job_push(struct xe_sched_job *job); void xe_sched_job_push(struct xe_sched_job *job);
int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm); int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
void xe_sched_job_init_user_fence(struct xe_sched_job *job,
struct xe_sync_entry *sync);
static inline struct xe_sched_job * static inline struct xe_sched_job *
to_xe_sched_job(struct drm_sched_job *drm) to_xe_sched_job(struct drm_sched_job *drm)
......
...@@ -224,8 +224,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job) ...@@ -224,8 +224,7 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
return 0; return 0;
} }
void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job, void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
struct dma_fence *fence)
{ {
if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL)) if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
return; return;
...@@ -254,10 +253,6 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job, ...@@ -254,10 +253,6 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
user_fence_put(sync->ufence); user_fence_put(sync->ufence);
dma_fence_put(fence); dma_fence_put(fence);
} }
} else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) {
job->user_fence.used = true;
job->user_fence.addr = sync->addr;
job->user_fence.value = sync->timeline_value;
} }
} }
......
...@@ -26,7 +26,6 @@ int xe_sync_entry_wait(struct xe_sync_entry *sync); ...@@ -26,7 +26,6 @@ int xe_sync_entry_wait(struct xe_sync_entry *sync);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync, int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job); struct xe_sched_job *job);
void xe_sync_entry_signal(struct xe_sync_entry *sync, void xe_sync_entry_signal(struct xe_sync_entry *sync,
struct xe_sched_job *job,
struct dma_fence *fence); struct dma_fence *fence);
void xe_sync_entry_cleanup(struct xe_sync_entry *sync); void xe_sync_entry_cleanup(struct xe_sync_entry *sync);
struct dma_fence * struct dma_fence *
......
...@@ -1700,7 +1700,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, ...@@ -1700,7 +1700,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
if (last_op) { if (last_op) {
for (i = 0; i < num_syncs; i++) for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], NULL, fence); xe_sync_entry_signal(&syncs[i], fence);
} }
return fence; return fence;
...@@ -1774,7 +1774,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, ...@@ -1774,7 +1774,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
if (last_op) { if (last_op) {
for (i = 0; i < num_syncs; i++) for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], NULL, xe_sync_entry_signal(&syncs[i],
cf ? &cf->base : fence); cf ? &cf->base : fence);
} }
...@@ -1835,7 +1835,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, ...@@ -1835,7 +1835,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
if (last_op) { if (last_op) {
for (i = 0; i < num_syncs; i++) for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], NULL, fence); xe_sync_entry_signal(&syncs[i], fence);
} }
} }
...@@ -2056,7 +2056,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, ...@@ -2056,7 +2056,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
struct dma_fence *fence = struct dma_fence *fence =
xe_exec_queue_last_fence_get(wait_exec_queue, vm); xe_exec_queue_last_fence_get(wait_exec_queue, vm);
xe_sync_entry_signal(&syncs[i], NULL, fence); xe_sync_entry_signal(&syncs[i], fence);
dma_fence_put(fence); dma_fence_put(fence);
} }
} }
...@@ -2934,7 +2934,7 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, ...@@ -2934,7 +2934,7 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
return PTR_ERR(fence); return PTR_ERR(fence);
for (i = 0; i < num_syncs; i++) for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], NULL, fence); xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
fence); fence);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment