Commit cdc50176 authored by Nayan Deshmukh's avatar Nayan Deshmukh Committed by Alex Deucher

drm/scheduler: modify API to avoid redundancy

entity has a scheduler field and we don't need the sched argument
in any of the functions where entity is provided.
Signed-off-by: default avatarNayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarEric Anholt <eric@anholt.net>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent bf314ca3
...@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job; job = p->job;
p->job = NULL; p->job = NULL;
r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) { if (r) {
amdgpu_job_free(job); amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn); amdgpu_mn_unlock(p->mn);
......
...@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed: failed:
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
drm_sched_entity_destroy(&adev->rings[j]->sched, drm_sched_entity_destroy(&ctx->rings[j].entity);
&ctx->rings[j].entity);
kfree(ctx->fences); kfree(ctx->fences);
ctx->fences = NULL; ctx->fences = NULL;
return r; return r;
...@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) ...@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue; continue;
drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, drm_sched_entity_destroy(&ctx->rings[i].entity);
&ctx->rings[i].entity);
} }
amdgpu_ctx_fini(ref); amdgpu_ctx_fini(ref);
...@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) ...@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue; continue;
max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
&ctx->rings[i].entity, max_wait); max_wait);
} }
} }
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
...@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) ...@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue; continue;
if (kref_read(&ctx->refcount) == 1) if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_fini(&ctx->adev->rings[i]->sched, drm_sched_entity_fini(&ctx->rings[i].entity);
&ctx->rings[i].entity);
else else
DRM_ERROR("ctx %p is still alive\n", ctx); DRM_ERROR("ctx %p is still alive\n", ctx);
} }
......
...@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, ...@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (!f) if (!f)
return -EINVAL; return -EINVAL;
r = drm_sched_job_init(&job->base, entity->sched, entity, owner); r = drm_sched_job_init(&job->base, entity, owner);
if (r) if (r)
return r; return r;
......
...@@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ...@@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return; return;
} }
} else { } else {
drm_sched_entity_destroy(adev->mman.entity.sched, drm_sched_entity_destroy(&adev->mman.entity);
&adev->mman.entity);
dma_fence_put(man->move); dma_fence_put(man->move);
man->move = NULL; man->move = NULL;
} }
......
...@@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) ...@@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
int i, j; int i, j;
drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, drm_sched_entity_destroy(&adev->uvd.entity);
&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo); kfree(adev->uvd.inst[j].saved_bo);
......
...@@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) ...@@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL) if (adev->vce.vcpu_bo == NULL)
return 0; return 0;
drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr); (void **)&adev->vce.cpu_addr);
......
...@@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->root.base.bo = NULL; vm->root.base.bo = NULL;
error_free_sched_entity: error_free_sched_entity:
drm_sched_entity_destroy(&ring->sched, &vm->entity); drm_sched_entity_destroy(&vm->entity);
return r; return r;
} }
...@@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
} }
drm_sched_entity_destroy(vm->entity.sched, &vm->entity); drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n"); dev_err(adev->dev, "still active bo inside vm\n");
......
...@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) ...@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
gpu->lastctx = NULL; gpu->lastctx = NULL;
mutex_unlock(&gpu->lock); mutex_unlock(&gpu->lock);
drm_sched_entity_destroy(&gpu->sched, drm_sched_entity_destroy(&ctx->sched_entity[i]);
&ctx->sched_entity[i]);
} }
} }
......
...@@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, ...@@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
{ {
int ret; int ret;
ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, ret = drm_sched_job_init(&submit->sched_job, sched_entity,
sched_entity, submit->cmdbuf.ctx); submit->cmdbuf.ctx);
if (ret) if (ret)
return ret; return ret;
......
...@@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, ...@@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
* *
* Returns the remaining time in jiffies left from the input timeout * Returns the remaining time in jiffies left from the input timeout
*/ */
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
struct drm_sched_entity *entity, long timeout)
{ {
struct drm_gpu_scheduler *sched;
long ret = timeout; long ret = timeout;
sched = entity->sched;
if (!drm_sched_entity_is_initialized(sched, entity)) if (!drm_sched_entity_is_initialized(sched, entity))
return ret; return ret;
/** /**
...@@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush); ...@@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
* entity and signals all jobs with an error code if the process was killed. * entity and signals all jobs with an error code if the process was killed.
* *
*/ */
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, void drm_sched_entity_fini(struct drm_sched_entity *entity)
struct drm_sched_entity *entity)
{ {
struct drm_gpu_scheduler *sched;
sched = entity->sched;
drm_sched_entity_set_rq(entity, NULL); drm_sched_entity_set_rq(entity, NULL);
/* Consumption of existing IBs wasn't completed. Forcefully /* Consumption of existing IBs wasn't completed. Forcefully
...@@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini); ...@@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
* *
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
*/ */
void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, void drm_sched_entity_destroy(struct drm_sched_entity *entity)
struct drm_sched_entity *entity)
{ {
drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
drm_sched_entity_fini(sched, entity); drm_sched_entity_fini(entity);
} }
EXPORT_SYMBOL(drm_sched_entity_destroy); EXPORT_SYMBOL(drm_sched_entity_destroy);
...@@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery); ...@@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
* Returns 0 for success, negative error code otherwise. * Returns 0 for success, negative error code otherwise.
*/ */
int drm_sched_job_init(struct drm_sched_job *job, int drm_sched_job_init(struct drm_sched_job *job,
struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, struct drm_sched_entity *entity,
void *owner) void *owner)
{ {
struct drm_gpu_scheduler *sched = entity->sched;
job->sched = sched; job->sched = sched;
job->entity = entity; job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq; job->s_priority = entity->rq - sched->sched_rq;
......
...@@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file) ...@@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void static void
v3d_postclose(struct drm_device *dev, struct drm_file *file) v3d_postclose(struct drm_device *dev, struct drm_file *file)
{ {
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv; struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q; enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) { for (q = 0; q < V3D_MAX_QUEUES; q++) {
drm_sched_entity_destroy(&v3d->queue[q].sched, drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
&v3d_priv->sched_entity[q]);
} }
kfree(v3d_priv); kfree(v3d_priv);
......
...@@ -553,7 +553,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ...@@ -553,7 +553,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
mutex_lock(&v3d->sched_lock); mutex_lock(&v3d->sched_lock);
if (exec->bin.start != exec->bin.end) { if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(&exec->bin.base, ret = drm_sched_job_init(&exec->bin.base,
&v3d->queue[V3D_BIN].sched,
&v3d_priv->sched_entity[V3D_BIN], &v3d_priv->sched_entity[V3D_BIN],
v3d_priv); v3d_priv);
if (ret) if (ret)
...@@ -568,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ...@@ -568,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
} }
ret = drm_sched_job_init(&exec->render.base, ret = drm_sched_job_init(&exec->render.base,
&v3d->queue[V3D_RENDER].sched,
&v3d_priv->sched_entity[V3D_RENDER], &v3d_priv->sched_entity[V3D_RENDER],
v3d_priv); v3d_priv);
if (ret) if (ret)
......
...@@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, ...@@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_rq **rq_list, struct drm_sched_rq **rq_list,
unsigned int num_rq_list, unsigned int num_rq_list,
atomic_t *guilty); atomic_t *guilty);
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
struct drm_sched_entity *entity, long timeout); void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, void drm_sched_entity_destroy(struct drm_sched_entity *entity);
struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job, void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity); struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity, void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
...@@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create( ...@@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence); void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence); void drm_sched_fence_finished(struct drm_sched_fence *fence);
int drm_sched_job_init(struct drm_sched_job *job, int drm_sched_job_init(struct drm_sched_job *job,
struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, struct drm_sched_entity *entity,
void *owner); void *owner);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment