Commit 2b184d8d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: use a spinlock instead of a mutex for the rq

More appropriate and fixes some nasty lockdep warnings.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent a3348bb8
...@@ -30,27 +30,27 @@ ...@@ -30,27 +30,27 @@
/* Initialize a given run queue struct */ /* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq) static void amd_sched_rq_init(struct amd_sched_rq *rq)
{ {
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities); INIT_LIST_HEAD(&rq->entities);
mutex_init(&rq->lock);
rq->current_entity = NULL; rq->current_entity = NULL;
} }
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
struct amd_sched_entity *entity) struct amd_sched_entity *entity)
{ {
mutex_lock(&rq->lock); spin_lock(&rq->lock);
list_add_tail(&entity->list, &rq->entities); list_add_tail(&entity->list, &rq->entities);
mutex_unlock(&rq->lock); spin_unlock(&rq->lock);
} }
static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
struct amd_sched_entity *entity) struct amd_sched_entity *entity)
{ {
mutex_lock(&rq->lock); spin_lock(&rq->lock);
list_del_init(&entity->list); list_del_init(&entity->list);
if (rq->current_entity == entity) if (rq->current_entity == entity)
rq->current_entity = NULL; rq->current_entity = NULL;
mutex_unlock(&rq->lock); spin_unlock(&rq->lock);
} }
/** /**
...@@ -61,12 +61,16 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, ...@@ -61,12 +61,16 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
static struct amd_sched_entity * static struct amd_sched_entity *
amd_sched_rq_select_entity(struct amd_sched_rq *rq) amd_sched_rq_select_entity(struct amd_sched_rq *rq)
{ {
struct amd_sched_entity *entity = rq->current_entity; struct amd_sched_entity *entity;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) { if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) { list_for_each_entry_continue(entity, &rq->entities, list) {
if (!kfifo_is_empty(&entity->job_queue)) { if (!kfifo_is_empty(&entity->job_queue)) {
rq->current_entity = entity; rq->current_entity = entity;
spin_unlock(&rq->lock);
return rq->current_entity; return rq->current_entity;
} }
} }
...@@ -76,6 +80,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq) ...@@ -76,6 +80,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
if (!kfifo_is_empty(&entity->job_queue)) { if (!kfifo_is_empty(&entity->job_queue)) {
rq->current_entity = entity; rq->current_entity = entity;
spin_unlock(&rq->lock);
return rq->current_entity; return rq->current_entity;
} }
...@@ -83,6 +88,8 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq) ...@@ -83,6 +88,8 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
break; break;
} }
spin_unlock(&rq->lock);
return NULL; return NULL;
} }
...@@ -108,22 +115,6 @@ static bool is_scheduler_ready(struct amd_gpu_scheduler *sched) ...@@ -108,22 +115,6 @@ static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
return full; return full;
} }
/**
* Select next entity from the kernel run queue, if not available,
* return null.
*/
static struct amd_sched_entity *
kernel_rq_select_context(struct amd_gpu_scheduler *sched)
{
struct amd_sched_entity *sched_entity;
struct amd_sched_rq *rq = &sched->kernel_rq;
mutex_lock(&rq->lock);
sched_entity = amd_sched_rq_select_entity(rq);
mutex_unlock(&rq->lock);
return sched_entity;
}
/** /**
* Select next entity containing real IB submissions * Select next entity containing real IB submissions
*/ */
...@@ -132,21 +123,15 @@ select_context(struct amd_gpu_scheduler *sched) ...@@ -132,21 +123,15 @@ select_context(struct amd_gpu_scheduler *sched)
{ {
struct amd_sched_entity *wake_entity = NULL; struct amd_sched_entity *wake_entity = NULL;
struct amd_sched_entity *tmp; struct amd_sched_entity *tmp;
struct amd_sched_rq *rq;
if (!is_scheduler_ready(sched)) if (!is_scheduler_ready(sched))
return NULL; return NULL;
/* Kernel run queue has higher priority than normal run queue*/ /* Kernel run queue has higher priority than normal run queue*/
tmp = kernel_rq_select_context(sched); tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
if (tmp != NULL) if (tmp == NULL)
goto exit; tmp = amd_sched_rq_select_entity(&sched->sched_rq);
rq = &sched->sched_rq;
mutex_lock(&rq->lock);
tmp = amd_sched_rq_select_entity(rq);
mutex_unlock(&rq->lock);
exit:
if (sched->current_entity && (sched->current_entity != tmp)) if (sched->current_entity && (sched->current_entity != tmp))
wake_entity = sched->current_entity; wake_entity = sched->current_entity;
sched->current_entity = tmp; sched->current_entity = tmp;
......
...@@ -63,7 +63,7 @@ struct amd_sched_entity { ...@@ -63,7 +63,7 @@ struct amd_sched_entity {
* the next entity to emit commands from. * the next entity to emit commands from.
*/ */
struct amd_sched_rq { struct amd_sched_rq {
struct mutex lock; spinlock_t lock;
struct list_head entities; struct list_head entities;
struct amd_sched_entity *current_entity; struct amd_sched_entity *current_entity;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment