Commit 35963cf2 authored by Matthew Brost's avatar Matthew Brost Committed by Luben Tuikov

drm/sched: Add drm_sched_wqueue_* helpers

Add scheduler wqueue ready, stop, and start helpers to hide the
implementation details of the scheduler from the drivers.

v2:
  - s/sched_wqueue/sched_wqueue (Luben)
  - Remove the extra white line after the return-statement (Luben)
  - update drm_sched_wqueue_ready comment (Luben)

Cc: Luben Tuikov <luben.tuikov@amd.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-2-matthew.brost@intel.comSigned-off-by: default avatarLuben Tuikov <ltuikov89@gmail.com>
parent 0da611a8
...@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus ...@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
for (i = 0; i < adev->gfx.num_compute_rings; i++) { for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (!(ring && ring->sched.thread)) if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
continue; continue;
/* stop secheduler and drain ring. */ /* stop secheduler and drain ring. */
......
...@@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) ...@@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->sched.thread) if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue; continue;
kthread_park(ring->sched.thread); drm_sched_wqueue_stop(&ring->sched);
} }
seq_puts(m, "run ib test:\n"); seq_puts(m, "run ib test:\n");
...@@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) ...@@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->sched.thread) if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue; continue;
kthread_unpark(ring->sched.thread); drm_sched_wqueue_start(&ring->sched);
} }
up_write(&adev->reset_domain->sem); up_write(&adev->reset_domain->sem);
...@@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ...@@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
ring = adev->rings[val]; ring = adev->rings[val];
if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) if (!ring || !ring->funcs->preempt_ib ||
!drm_sched_wqueue_ready(&ring->sched))
return -EINVAL; return -EINVAL;
/* the last preemption failed */ /* the last preemption failed */
...@@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ...@@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
goto pro_end; goto pro_end;
/* stop the scheduler */ /* stop the scheduler */
kthread_park(ring->sched.thread); drm_sched_wqueue_stop(&ring->sched);
/* preempt the IB */ /* preempt the IB */
r = amdgpu_ring_preempt_ib(ring); r = amdgpu_ring_preempt_ib(ring);
...@@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ...@@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
failure: failure:
/* restart the scheduler */ /* restart the scheduler */
kthread_unpark(ring->sched.thread); drm_sched_wqueue_start(&ring->sched);
up_read(&adev->reset_domain->sem); up_read(&adev->reset_domain->sem);
......
...@@ -4601,7 +4601,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) ...@@ -4601,7 +4601,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->sched.thread) if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue; continue;
spin_lock(&ring->sched.job_list_lock); spin_lock(&ring->sched.job_list_lock);
...@@ -4740,7 +4740,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, ...@@ -4740,7 +4740,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->sched.thread) if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue; continue;
/* Clear job fence from fence drv to avoid force_completion /* Clear job fence from fence drv to avoid force_completion
...@@ -5282,7 +5282,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -5282,7 +5282,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i]; struct amdgpu_ring *ring = tmp_adev->rings[i];
if (!ring || !ring->sched.thread) if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue; continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL); drm_sched_stop(&ring->sched, job ? &job->base : NULL);
...@@ -5357,7 +5357,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -5357,7 +5357,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i]; struct amdgpu_ring *ring = tmp_adev->rings[i];
if (!ring || !ring->sched.thread) if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue; continue;
drm_sched_start(&ring->sched, true); drm_sched_start(&ring->sched, true);
...@@ -5683,7 +5683,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta ...@@ -5683,7 +5683,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->sched.thread) if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue; continue;
drm_sched_stop(&ring->sched, NULL); drm_sched_stop(&ring->sched, NULL);
...@@ -5811,7 +5811,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) ...@@ -5811,7 +5811,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->sched.thread) if (!ring || !drm_sched_wqueue_ready(&ring->sched))
continue; continue;
drm_sched_start(&ring->sched, true); drm_sched_start(&ring->sched, true);
......
...@@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu) ...@@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu)
*/ */
for (i = 0; i < gpu->nr_rings; i++) { for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
kthread_park(sched->thread);
drm_sched_wqueue_stop(sched);
} }
} }
...@@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu) ...@@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) { for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
kthread_unpark(sched->thread);
drm_sched_wqueue_start(sched);
} }
} }
......
...@@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) ...@@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{ {
struct drm_sched_job *s_job, *tmp; struct drm_sched_job *s_job, *tmp;
kthread_park(sched->thread); drm_sched_wqueue_stop(sched);
/* /*
* Reinsert back the bad job here - now it's safe as * Reinsert back the bad job here - now it's safe as
...@@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) ...@@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
spin_unlock(&sched->job_list_lock); spin_unlock(&sched->job_list_lock);
} }
kthread_unpark(sched->thread); drm_sched_wqueue_start(sched);
} }
EXPORT_SYMBOL(drm_sched_start); EXPORT_SYMBOL(drm_sched_start);
...@@ -1252,3 +1252,38 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) ...@@ -1252,3 +1252,38 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
} }
} }
EXPORT_SYMBOL(drm_sched_increase_karma); EXPORT_SYMBOL(drm_sched_increase_karma);
/**
* drm_sched_wqueue_ready - Is the scheduler ready for submission
*
* @sched: scheduler instance
*
* Returns true if submission is ready
*/
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
{
return !!sched->thread;
}
EXPORT_SYMBOL(drm_sched_wqueue_ready);
/**
* drm_sched_wqueue_stop - stop scheduler submission
*
* @sched: scheduler instance
*/
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
{
kthread_park(sched->thread);
}
EXPORT_SYMBOL(drm_sched_wqueue_stop);
/**
* drm_sched_wqueue_start - start scheduler submission
*
* @sched: scheduler instance
*/
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
{
kthread_unpark(sched->thread);
}
EXPORT_SYMBOL(drm_sched_wqueue_start);
...@@ -553,6 +553,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, ...@@ -553,6 +553,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment