Commit 8344c53f authored by Nayan Deshmukh's avatar Nayan Deshmukh Committed by Alex Deucher

drm/scheduler: remove unused parameter

this patch also effect the amdgpu and etnaviv drivers which
use the function drm_sched_entity_init
Signed-off-by: default avatarNayan Deshmukh <nayan26deshmukh@gmail.com>
Suggested-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1297bf2e
...@@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
continue; continue;
r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs, &ctx->guilty); rq, &ctx->guilty);
if (r) if (r)
goto failed; goto failed;
} }
......
...@@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) ...@@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
ring = adev->mman.buffer_funcs_ring; ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
rq, amdgpu_sched_jobs, NULL); rq, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n"); DRM_ERROR("Failed setting up TTM BO move run queue.\n");
goto error_entity; goto error_entity;
......
...@@ -242,7 +242,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -242,7 +242,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
ring = &adev->uvd.ring; ring = &adev->uvd.ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
rq, amdgpu_sched_jobs, NULL); rq, NULL);
if (r != 0) { if (r != 0) {
DRM_ERROR("Failed setting up UVD run queue.\n"); DRM_ERROR("Failed setting up UVD run queue.\n");
return r; return r;
......
...@@ -186,7 +186,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ...@@ -186,7 +186,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
ring = &adev->vce.ring[0]; ring = &adev->vce.ring[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
rq, amdgpu_sched_jobs, NULL); rq, NULL);
if (r != 0) { if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n"); DRM_ERROR("Failed setting up VCE run queue.\n");
return r; return r;
......
...@@ -105,7 +105,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -105,7 +105,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
ring = &adev->vcn.ring_dec; ring = &adev->vcn.ring_dec;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
rq, amdgpu_sched_jobs, NULL); rq, NULL);
if (r != 0) { if (r != 0) {
DRM_ERROR("Failed setting up VCN dec run queue.\n"); DRM_ERROR("Failed setting up VCN dec run queue.\n");
return r; return r;
...@@ -114,7 +114,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -114,7 +114,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
ring = &adev->vcn.ring_enc[0]; ring = &adev->vcn.ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
rq, amdgpu_sched_jobs, NULL); rq, NULL);
if (r != 0) { if (r != 0) {
DRM_ERROR("Failed setting up VCN enc run queue.\n"); DRM_ERROR("Failed setting up VCN enc run queue.\n");
return r; return r;
......
...@@ -2404,7 +2404,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2404,7 +2404,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring = adev->vm_manager.vm_pte_rings[ring_instance]; ring = adev->vm_manager.vm_pte_rings[ring_instance];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
r = drm_sched_entity_init(&ring->sched, &vm->entity, r = drm_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs, NULL); rq, NULL);
if (r) if (r)
return r; return r;
......
...@@ -429,7 +429,7 @@ static int uvd_v6_0_sw_init(void *handle) ...@@ -429,7 +429,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring_enc[0]; ring = &adev->uvd.ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
rq, amdgpu_sched_jobs, NULL); rq, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n"); DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r; return r;
......
...@@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle) ...@@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle)
ring = &adev->uvd.ring_enc[0]; ring = &adev->uvd.ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
rq, amdgpu_sched_jobs, NULL); rq, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n"); DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r; return r;
......
...@@ -116,7 +116,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) ...@@ -116,7 +116,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
drm_sched_entity_init(&gpu->sched, drm_sched_entity_init(&gpu->sched,
&ctx->sched_entity[i], &ctx->sched_entity[i],
&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
32, NULL); NULL);
} }
} }
......
...@@ -117,7 +117,6 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) ...@@ -117,7 +117,6 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
* @sched The pointer to the scheduler * @sched The pointer to the scheduler
* @entity The pointer to a valid drm_sched_entity * @entity The pointer to a valid drm_sched_entity
* @rq The run queue this entity belongs * @rq The run queue this entity belongs
* @jobs The max number of jobs in the job queue
* @guilty atomic_t set to 1 when a job on this queue * @guilty atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout * is found to be guilty causing a timeout
* *
...@@ -126,7 +125,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) ...@@ -126,7 +125,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
int drm_sched_entity_init(struct drm_gpu_scheduler *sched, int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, struct drm_sched_entity *entity,
struct drm_sched_rq *rq, struct drm_sched_rq *rq,
uint32_t jobs, atomic_t *guilty) atomic_t *guilty)
{ {
if (!(sched && entity && rq)) if (!(sched && entity && rq))
return -EINVAL; return -EINVAL;
......
...@@ -188,7 +188,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched); ...@@ -188,7 +188,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_entity_init(struct drm_gpu_scheduler *sched, int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, struct drm_sched_entity *entity,
struct drm_sched_rq *rq, struct drm_sched_rq *rq,
uint32_t jobs, atomic_t *guilty); atomic_t *guilty);
void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity); struct drm_sched_entity *entity);
void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment