Commit 0e28b10f authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove ring parameter from amdgpu_job_submit

We know the ring through the entity anyway.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent eb3961a5
...@@ -117,21 +117,20 @@ void amdgpu_job_free(struct amdgpu_job *job) ...@@ -117,21 +117,20 @@ void amdgpu_job_free(struct amdgpu_job *job)
kfree(job); kfree(job);
} }
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
struct drm_sched_entity *entity, void *owner, void *owner, struct dma_fence **f)
struct dma_fence **f)
{ {
int r; int r;
job->ring = ring;
if (!f) if (!f)
return -EINVAL; return -EINVAL;
r = drm_sched_job_init(&job->base, &ring->sched, entity, owner); r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
if (r) if (r)
return r; return r;
job->owner = owner; job->owner = owner;
job->ring = to_amdgpu_ring(entity->sched);
*f = dma_fence_get(&job->base.s_fence->finished); *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, job->base.s_priority); amdgpu_ring_priority_get(job->ring, job->base.s_priority);
......
...@@ -67,7 +67,6 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, ...@@ -67,7 +67,6 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
struct drm_sched_entity *entity, void *owner, void *owner, struct dma_fence **f);
struct dma_fence **f);
#endif #endif
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#define AMDGPU_FENCE_FLAG_INT (1 << 1) #define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
enum amdgpu_ring_type { enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX, AMDGPU_RING_TYPE_GFX,
AMDGPU_RING_TYPE_COMPUTE, AMDGPU_RING_TYPE_COMPUTE,
......
...@@ -2006,7 +2006,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, ...@@ -2006,7 +2006,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
if (r) if (r)
goto error_free; goto error_free;
r = amdgpu_job_submit(job, ring, &adev->mman.entity, r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence); AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
if (r) if (r)
goto error_free; goto error_free;
...@@ -2083,7 +2083,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, ...@@ -2083,7 +2083,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
DRM_ERROR("Error scheduling IBs (%d)\n", r); DRM_ERROR("Error scheduling IBs (%d)\n", r);
amdgpu_job_free(job); amdgpu_job_free(job);
} else { } else {
r = amdgpu_job_submit(job, ring, &adev->mman.entity, r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence); AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r) if (r)
goto error_free; goto error_free;
...@@ -2175,7 +2175,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ...@@ -2175,7 +2175,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
amdgpu_ring_pad_ib(ring, &job->ibs[0]); amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw); WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, ring, &adev->mman.entity, r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence); AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r) if (r)
goto error_free; goto error_free;
......
...@@ -1074,7 +1074,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1074,7 +1074,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r) if (r)
goto err_free; goto err_free;
r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity, r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err_free; goto err_free;
......
...@@ -539,7 +539,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -539,7 +539,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
amdgpu_job_free(job); amdgpu_job_free(job);
} else { } else {
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err; goto err;
......
...@@ -425,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -425,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r) if (r)
goto error_free; goto error_free;
r = amdgpu_job_submit(job, ring, &vm->entity, r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence); &fence);
if (r) if (r)
goto error_free; goto error_free;
...@@ -1120,8 +1120,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, ...@@ -1120,8 +1120,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv, amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
AMDGPU_FENCE_OWNER_VM, false); AMDGPU_FENCE_OWNER_VM, false);
WARN_ON(params.ib->length_dw > ndw); WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity, r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
AMDGPU_FENCE_OWNER_VM, &fence); &fence);
if (r) if (r)
goto error; goto error;
...@@ -1485,8 +1485,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -1485,8 +1485,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_ring_pad_ib(ring, params.ib); amdgpu_ring_pad_ib(ring, params.ib);
WARN_ON(params.ib->length_dw > ndw); WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity, r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
AMDGPU_FENCE_OWNER_VM, &f);
if (r) if (r)
goto error_free; goto error_free;
......
...@@ -320,7 +320,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, ...@@ -320,7 +320,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
amdgpu_job_free(job); amdgpu_job_free(job);
} else { } else {
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err; goto err;
......
...@@ -321,7 +321,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -321,7 +321,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
amdgpu_job_free(job); amdgpu_job_free(job);
} else { } else {
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment