Commit fc29b6d5 authored by Matthew Brost's avatar Matthew Brost Committed by Thomas Hellström

drm/xe: Take a reference in xe_exec_queue_last_fence_get()

Take a reference in xe_exec_queue_last_fence_get(). Also fix a reference
counting underflow bug VM bind and unbind.

Fixes: dd08ebf6 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240201004849.2219558-2-matthew.brost@intel.com
(cherry picked from commit a856b67a)
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
parent ddc7d4c5
...@@ -926,20 +926,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) ...@@ -926,20 +926,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
* @q: The exec queue * @q: The exec queue
* @vm: The VM the engine does a bind or exec for * @vm: The VM the engine does a bind or exec for
* *
* Get last fence, does not take a ref * Get last fence, takes a ref
* *
* Returns: last fence if not signaled, dma fence stub if signaled * Returns: last fence if not signaled, dma fence stub if signaled
*/ */
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
struct xe_vm *vm) struct xe_vm *vm)
{ {
struct dma_fence *fence;
xe_exec_queue_last_fence_lockdep_assert(q, vm); xe_exec_queue_last_fence_lockdep_assert(q, vm);
if (q->last_fence && if (q->last_fence &&
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
xe_exec_queue_last_fence_put(q, vm); xe_exec_queue_last_fence_put(q, vm);
return q->last_fence ? q->last_fence : dma_fence_get_stub(); fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
dma_fence_get(fence);
return fence;
} }
/** /**
......
...@@ -1204,9 +1204,12 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -1204,9 +1204,12 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
} }
if (q) { if (q) {
fence = xe_exec_queue_last_fence_get(q, vm); fence = xe_exec_queue_last_fence_get(q, vm);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
dma_fence_put(fence);
return false; return false;
} }
dma_fence_put(fence);
}
return true; return true;
} }
......
...@@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) ...@@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
struct dma_fence *fence; struct dma_fence *fence;
fence = xe_exec_queue_last_fence_get(job->q, vm); fence = xe_exec_queue_last_fence_get(job->q, vm);
dma_fence_get(fence);
return drm_sched_job_add_dependency(&job->drm, fence); return drm_sched_job_add_dependency(&job->drm, fence);
} }
...@@ -307,7 +307,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, ...@@ -307,7 +307,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
/* Easy case... */ /* Easy case... */
if (!num_in_fence) { if (!num_in_fence) {
fence = xe_exec_queue_last_fence_get(q, vm); fence = xe_exec_queue_last_fence_get(q, vm);
dma_fence_get(fence);
return fence; return fence;
} }
...@@ -322,7 +321,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, ...@@ -322,7 +321,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
} }
} }
fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
dma_fence_get(fences[current_fence - 1]);
cf = dma_fence_array_create(num_in_fence, fences, cf = dma_fence_array_create(num_in_fence, fences,
vm->composite_fence_ctx, vm->composite_fence_ctx,
vm->composite_fence_seqno++, vm->composite_fence_seqno++,
......
...@@ -1984,6 +1984,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, ...@@ -1984,6 +1984,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
xe_exec_queue_last_fence_get(wait_exec_queue, vm); xe_exec_queue_last_fence_get(wait_exec_queue, vm);
xe_sync_entry_signal(&syncs[i], NULL, fence); xe_sync_entry_signal(&syncs[i], NULL, fence);
dma_fence_put(fence);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment