Commit e2d732fd authored by Luben Tuikov's avatar Luben Tuikov Committed by Alex Deucher

drm/scheduler: Scheduler priority fixes (v2)

Remove DRM_SCHED_PRIORITY_LOW, as it was used
in only one place.

Rename and separate by a line
DRM_SCHED_PRIORITY_MAX to DRM_SCHED_PRIORITY_COUNT
as it represents a (total) count of said
priorities and it is used as such in loops
throughout the code. (0-based indexing is the
the count number.)

Remove redundant word HIGH in priority names,
and rename *KERNEL* to *HIGH*, as it really
means that, high.

v2: Add back KERNEL and remove SW and HW,
    in lieu of a single HIGH between NORMAL and KERNEL.
Signed-off-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c80e966b
...@@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { ...@@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
static int amdgpu_ctx_priority_permit(struct drm_file *filp, static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority) enum drm_sched_priority priority)
{ {
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
return -EINVAL; return -EINVAL;
/* NORMAL and below are accessible by everyone */ /* NORMAL and below are accessible by everyone */
...@@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, ...@@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
{ {
switch (prio) { switch (prio) {
case DRM_SCHED_PRIORITY_HIGH_HW: case DRM_SCHED_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL: case DRM_SCHED_PRIORITY_KERNEL:
return AMDGPU_GFX_PIPE_PRIO_HIGH; return AMDGPU_GFX_PIPE_PRIO_HIGH;
default: default:
......
...@@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) ...@@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i; int i;
/* Signal all jobs not yet scheduled */ /* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i]; struct drm_sched_rq *rq = &sched->sched_rq[i];
if (!rq) if (!rq)
......
...@@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ...@@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
&ring->sched; &ring->sched;
} }
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
atomic_set(&ring->num_jobs[i], 0); atomic_set(&ring->num_jobs[i], 0);
return 0; return 0;
......
...@@ -243,7 +243,7 @@ struct amdgpu_ring { ...@@ -243,7 +243,7 @@ struct amdgpu_ring {
bool has_compute_vm_bug; bool has_compute_vm_bug;
bool no_scheduler; bool no_scheduler;
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
struct mutex priority_mutex; struct mutex priority_mutex;
/* protected by priority_mutex */ /* protected by priority_mutex */
int priority; int priority;
......
...@@ -36,14 +36,14 @@ enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) ...@@ -36,14 +36,14 @@ enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
{ {
switch (amdgpu_priority) { switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH: case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return DRM_SCHED_PRIORITY_HIGH_HW; return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_HIGH: case AMDGPU_CTX_PRIORITY_HIGH:
return DRM_SCHED_PRIORITY_HIGH_SW; return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_NORMAL: case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL; return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW: case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_LOW; return DRM_SCHED_PRIORITY_MIN;
case AMDGPU_CTX_PRIORITY_UNSET: case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET; return DRM_SCHED_PRIORITY_UNSET;
default: default:
......
...@@ -2109,7 +2109,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ...@@ -2109,7 +2109,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring; ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched; sched = &ring->sched;
r = drm_sched_entity_init(&adev->mman.entity, r = drm_sched_entity_init(&adev->mman.entity,
DRM_SCHED_PRIORITY_KERNEL, &sched, DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL); 1, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
......
...@@ -623,7 +623,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) ...@@ -623,7 +623,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
return NULL; return NULL;
/* Kernel run queue has higher priority than normal run queue*/ /* Kernel run queue has higher priority than normal run queue*/
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
if (entity) if (entity)
break; break;
...@@ -851,7 +851,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, ...@@ -851,7 +851,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->name = name; sched->name = name;
sched->timeout = timeout; sched->timeout = timeout;
sched->hang_limit = hang_limit; sched->hang_limit = hang_limit;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
drm_sched_rq_init(sched, &sched->sched_rq[i]); drm_sched_rq_init(sched, &sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->wake_up_worker);
......
...@@ -33,14 +33,16 @@ ...@@ -33,14 +33,16 @@
struct drm_gpu_scheduler; struct drm_gpu_scheduler;
struct drm_sched_rq; struct drm_sched_rq;
/* These are often used as an (initial) index
* to an array, and as such should start at 0.
*/
enum drm_sched_priority { enum drm_sched_priority {
DRM_SCHED_PRIORITY_MIN, DRM_SCHED_PRIORITY_MIN,
DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
DRM_SCHED_PRIORITY_NORMAL, DRM_SCHED_PRIORITY_NORMAL,
DRM_SCHED_PRIORITY_HIGH_SW, DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_HIGH_HW,
DRM_SCHED_PRIORITY_KERNEL, DRM_SCHED_PRIORITY_KERNEL,
DRM_SCHED_PRIORITY_MAX,
DRM_SCHED_PRIORITY_COUNT,
DRM_SCHED_PRIORITY_INVALID = -1, DRM_SCHED_PRIORITY_INVALID = -1,
DRM_SCHED_PRIORITY_UNSET = -2 DRM_SCHED_PRIORITY_UNSET = -2
}; };
...@@ -274,7 +276,7 @@ struct drm_gpu_scheduler { ...@@ -274,7 +276,7 @@ struct drm_gpu_scheduler {
uint32_t hw_submission_limit; uint32_t hw_submission_limit;
long timeout; long timeout;
const char *name; const char *name;
struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
wait_queue_head_t wake_up_worker; wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled; wait_queue_head_t job_scheduled;
atomic_t hw_rq_count; atomic_t hw_rq_count;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment