Commit 7abbbe26 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin Committed by Luben Tuikov

drm/sched: Rename drm_sched_get_cleanup_job to be more descriptive

"Get cleanup job" makes it sound like helper is returning a job which will
execute some cleanup, or something, while the kerneldoc itself accurately
says "fetch the next _finished_ job". So lets rename the helper to be self
documenting.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Luben Tuikov <ltuikov89@gmail.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231102105538.391648-2-tvrtko.ursulin@linux.intel.comReviewed-by: default avatarLuben Tuikov <ltuikov89@gmail.com>
Signed-off-by: default avatarLuben Tuikov <ltuikov89@gmail.com>
parent 3b511278
...@@ -448,7 +448,7 @@ static void drm_sched_job_timedout(struct work_struct *work) ...@@ -448,7 +448,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */ /* Protects against concurrent deletion in drm_sched_get_finished_job */
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list, job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list); struct drm_sched_job, list);
...@@ -500,9 +500,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) ...@@ -500,9 +500,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
/* /*
* Reinsert back the bad job here - now it's safe as * Reinsert back the bad job here - now it's safe as
* drm_sched_get_cleanup_job cannot race against us and release the * drm_sched_get_finished_job cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress * bad job at this point - we parked (waited for) any in progress
* (earlier) cleanups and drm_sched_get_cleanup_job will not be called * (earlier) cleanups and drm_sched_get_finished_job will not be called
* now until the scheduler thread is unparked. * now until the scheduler thread is unparked.
*/ */
if (bad && bad->sched == sched) if (bad && bad->sched == sched)
...@@ -960,7 +960,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) ...@@ -960,7 +960,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
} }
/** /**
* drm_sched_get_cleanup_job - fetch the next finished job to be destroyed * drm_sched_get_finished_job - fetch the next finished job to be destroyed
* *
* @sched: scheduler instance * @sched: scheduler instance
* *
...@@ -968,7 +968,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) ...@@ -968,7 +968,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
* ready for it to be destroyed. * ready for it to be destroyed.
*/ */
static struct drm_sched_job * static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
{ {
struct drm_sched_job *job, *next; struct drm_sched_job *job, *next;
...@@ -1059,14 +1059,14 @@ static void drm_sched_free_job_work(struct work_struct *w) ...@@ -1059,14 +1059,14 @@ static void drm_sched_free_job_work(struct work_struct *w)
{ {
struct drm_gpu_scheduler *sched = struct drm_gpu_scheduler *sched =
container_of(w, struct drm_gpu_scheduler, work_free_job); container_of(w, struct drm_gpu_scheduler, work_free_job);
struct drm_sched_job *cleanup_job; struct drm_sched_job *job;
if (READ_ONCE(sched->pause_submit)) if (READ_ONCE(sched->pause_submit))
return; return;
cleanup_job = drm_sched_get_cleanup_job(sched); job = drm_sched_get_finished_job(sched);
if (cleanup_job) { if (job) {
sched->ops->free_job(cleanup_job); sched->ops->free_job(job);
drm_sched_free_job_queue_if_done(sched); drm_sched_free_job_queue_if_done(sched);
drm_sched_run_job_queue_if_ready(sched); drm_sched_run_job_queue_if_ready(sched);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment