Commit 440d52b3 authored by Rob Clark's avatar Rob Clark Committed by Danilo Krummrich

drm/sched: Fix dynamic job-flow control race

Fixes a race condition reported here: https://github.com/AsahiLinux/linux/issues/309#issuecomment-2238968609

The whole premise of lockless access to a single-producer-single-
consumer queue is that there is just a single producer and single
consumer.  That means we can't call drm_sched_can_queue() (which is
about queueing more work to the hw, not to the spsc queue) from
anywhere other than the consumer (wq).

This call in the producer is just an optimization to avoid scheduling
the consuming worker if it cannot yet queue more work to the hw.  It
is safe to drop this optimization to avoid the race condition.
Suggested-by: default avatarAsahi Lina <lina@asahilina.net>
Fixes: a78422e9 ("drm/sched: implement dynamic job-flow control")
Closes: https://github.com/AsahiLinux/linux/issues/309
Cc: stable@vger.kernel.org
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarDanilo Krummrich <dakr@kernel.org>
Tested-by: default avatarJanne Grunau <j@jannau.net>
Signed-off-by: default avatarDanilo Krummrich <dakr@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20240913202301.16772-1-robdclark@gmail.com
parent a3096328
...@@ -380,7 +380,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, ...@@ -380,7 +380,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
container_of(cb, struct drm_sched_entity, cb); container_of(cb, struct drm_sched_entity, cb);
drm_sched_entity_clear_dep(f, cb); drm_sched_entity_clear_dep(f, cb);
drm_sched_wakeup(entity->rq->sched, entity); drm_sched_wakeup(entity->rq->sched);
} }
/** /**
...@@ -612,7 +612,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) ...@@ -612,7 +612,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo(entity, submit_ts); drm_sched_rq_update_fifo(entity, submit_ts);
drm_sched_wakeup(entity->rq->sched, entity); drm_sched_wakeup(entity->rq->sched);
} }
} }
EXPORT_SYMBOL(drm_sched_entity_push_job); EXPORT_SYMBOL(drm_sched_entity_push_job);
...@@ -1022,14 +1022,11 @@ EXPORT_SYMBOL(drm_sched_job_cleanup); ...@@ -1022,14 +1022,11 @@ EXPORT_SYMBOL(drm_sched_job_cleanup);
/** /**
* drm_sched_wakeup - Wake up the scheduler if it is ready to queue * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
* @sched: scheduler instance * @sched: scheduler instance
* @entity: the scheduler entity
* *
* Wake up the scheduler if we can queue jobs. * Wake up the scheduler if we can queue jobs.
*/ */
void drm_sched_wakeup(struct drm_gpu_scheduler *sched, void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
struct drm_sched_entity *entity)
{ {
if (drm_sched_can_queue(sched, entity))
drm_sched_run_job_queue(sched); drm_sched_run_job_queue(sched);
} }
......
...@@ -574,7 +574,7 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, ...@@ -574,7 +574,7 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity); void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment