Commit 940ca22b authored by Christian König's avatar Christian König

drm/amdgpu: drop amdgpu_sync from amdgpu_vmid_grab v2

Instead return the fence directly. Avoids memory allocation to store the
fence.

v2: cleanup coding style as well
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-6-christian.koenig@amd.com
parent c5093cdd
...@@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, ...@@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
* *
* @vm: vm to allocate id for * @vm: vm to allocate id for
* @ring: ring we want to submit job to * @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @idle: resulting idle VMID * @idle: resulting idle VMID
* @fence: fence to wait for if no id could be grabbed
* *
* Try to find an idle VMID, if none is idle add a fence to wait to the sync * Try to find an idle VMID, if none is idle add a fence to wait to the sync
* object. Returns -ENOMEM when we are out of memory. * object. Returns -ENOMEM when we are out of memory.
*/ */
static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
struct amdgpu_ring *ring, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct amdgpu_vmid **idle,
struct amdgpu_vmid **idle) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct dma_fence **fences; struct dma_fence **fences;
unsigned i; unsigned i;
int r;
if (!dma_fence_is_signaled(ring->vmid_wait)) if (!dma_fence_is_signaled(ring->vmid_wait)) {
return amdgpu_sync_fence(sync, ring->vmid_wait); *fence = dma_fence_get(ring->vmid_wait);
return 0;
}
fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
if (!fences) if (!fences)
...@@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, ...@@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
return -ENOMEM; return -ENOMEM;
} }
r = amdgpu_sync_fence(sync, &array->base); *fence = dma_fence_get(&array->base);
dma_fence_put(ring->vmid_wait); dma_fence_put(ring->vmid_wait);
ring->vmid_wait = &array->base; ring->vmid_wait = &array->base;
return r; return 0;
} }
kfree(fences); kfree(fences);
...@@ -243,17 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, ...@@ -243,17 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
* *
* @vm: vm to allocate id for * @vm: vm to allocate id for
* @ring: ring we want to submit job to * @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @job: job who wants to use the VMID * @job: job who wants to use the VMID
* @id: resulting VMID * @id: resulting VMID
* @fence: fence to wait for if no id could be grabbed
* *
* Try to assign a reserved VMID. * Try to assign a reserved VMID.
*/ */
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct amdgpu_ring *ring, struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct amdgpu_job *job, struct amdgpu_job *job,
struct amdgpu_vmid **id) struct amdgpu_vmid **id,
struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
...@@ -280,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -280,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) { if (tmp) {
*id = NULL; *id = NULL;
return amdgpu_sync_fence(sync, tmp); *fence = dma_fence_get(tmp);
return 0;
} }
needs_flush = true; needs_flush = true;
} }
...@@ -302,17 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -302,17 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
* *
* @vm: vm to allocate id for * @vm: vm to allocate id for
* @ring: ring we want to submit job to * @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @job: job who wants to use the VMID * @job: job who wants to use the VMID
* @id: resulting VMID * @id: resulting VMID
* @fence: fence to wait for if no id could be grabbed
* *
* Try to reuse a VMID for this submission. * Try to reuse a VMID for this submission.
*/ */
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring, struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct amdgpu_job *job, struct amdgpu_job *job,
struct amdgpu_vmid **id) struct amdgpu_vmid **id,
struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
...@@ -367,13 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, ...@@ -367,13 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
* *
* @vm: vm to allocate id for * @vm: vm to allocate id for
* @ring: ring we want to submit job to * @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @job: job who wants to use the VMID * @job: job who wants to use the VMID
* @fence: fence to wait for if no id could be grabbed
* *
* Allocate an id for the vm, adding fences to the sync obj as necessary. * Allocate an id for the vm, adding fences to the sync obj as necessary.
*/ */
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct amdgpu_job *job) struct amdgpu_job *job, struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
...@@ -383,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -383,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
int r = 0; int r = 0;
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
if (r || !idle) if (r || !idle)
goto error; goto error;
if (vm->reserved_vmid[vmhub]) { if (vm->reserved_vmid[vmhub]) {
r = amdgpu_vmid_grab_reserved(vm, ring, sync, job, &id); r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id) if (r || !id)
goto error; goto error;
} else { } else {
r = amdgpu_vmid_grab_used(vm, ring, sync, job, &id); r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
if (r) if (r)
goto error; goto error;
......
...@@ -84,7 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, ...@@ -84,7 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
unsigned vmhub); unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct amdgpu_job *job); struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid); unsigned vmid);
void amdgpu_vmid_reset_all(struct amdgpu_device *adev); void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
......
...@@ -239,12 +239,12 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, ...@@ -239,12 +239,12 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
return 0; return 0;
} }
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, static struct dma_fence *
amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity) struct drm_sched_entity *s_entity)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence; struct dma_fence *fence;
int r; int r;
...@@ -255,12 +255,10 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, ...@@ -255,12 +255,10 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
DRM_ERROR("Error adding fence (%d)\n", r); DRM_ERROR("Error adding fence (%d)\n", r);
} }
while (fence == NULL && vm && !job->vmid) { while (!fence && job->vm && !job->vmid) {
r = amdgpu_vmid_grab(vm, ring, &job->sync, job); r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
if (r) if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r); DRM_ERROR("Error getting VM ID (%d)\n", r);
fence = amdgpu_sync_get_fence(&job->sync);
} }
if (!fence && job->gang_submit) if (!fence && job->gang_submit)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment