Commit d1f6dc1a authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher

drm/amdgpu: Avoid accessing job->entity after the job is scheduled.

Bug: amdgpu_job_free_cb was accessing s_job->s_entity when the allocated
amdgpu_ctx (and the entity inside it) were already deallocated from
amdgpu_cs_parser_fini.

Fix: Save job's priority on it's creation instead of accessing it from
s_entity later on.
Signed-off-by: default avatarAndrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Reviewed-by: default avatarAndres Rodriguez <andresx7@gmail.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ca797d29
...@@ -1194,8 +1194,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1194,8 +1194,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = seq; job->uf_sequence = seq;
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, amdgpu_ring_priority_get(job->ring, job->base.s_priority);
amd_sched_get_job_priority(&job->base));
trace_amdgpu_cs_ioctl(job); trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);
......
...@@ -104,7 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) ...@@ -104,7 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{ {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job)); amdgpu_ring_priority_put(job->ring, s_job->s_priority);
dma_fence_put(job->fence); dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync); amdgpu_sync_free(&job->dep_sync);
...@@ -141,8 +141,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, ...@@ -141,8 +141,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context; job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished); *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, amdgpu_ring_priority_get(job->ring, job->base.s_priority);
amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);
return 0; return 0;
......
...@@ -529,6 +529,7 @@ int amd_sched_job_init(struct amd_sched_job *job, ...@@ -529,6 +529,7 @@ int amd_sched_job_init(struct amd_sched_job *job,
{ {
job->sched = sched; job->sched = sched;
job->s_entity = entity; job->s_entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
job->s_fence = amd_sched_fence_create(entity, owner); job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence) if (!job->s_fence)
return -ENOMEM; return -ENOMEM;
......
...@@ -30,6 +30,19 @@ ...@@ -30,6 +30,19 @@
struct amd_gpu_scheduler; struct amd_gpu_scheduler;
struct amd_sched_rq; struct amd_sched_rq;
enum amd_sched_priority {
AMD_SCHED_PRIORITY_MIN,
AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
AMD_SCHED_PRIORITY_NORMAL,
AMD_SCHED_PRIORITY_HIGH_SW,
AMD_SCHED_PRIORITY_HIGH_HW,
AMD_SCHED_PRIORITY_KERNEL,
AMD_SCHED_PRIORITY_MAX,
AMD_SCHED_PRIORITY_INVALID = -1,
AMD_SCHED_PRIORITY_UNSET = -2
};
/** /**
* A scheduler entity is a wrapper around a job queue or a group * A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their * of other entities. Entities take turns emitting jobs from their
...@@ -83,6 +96,7 @@ struct amd_sched_job { ...@@ -83,6 +96,7 @@ struct amd_sched_job {
struct delayed_work work_tdr; struct delayed_work work_tdr;
uint64_t id; uint64_t id;
atomic_t karma; atomic_t karma;
enum amd_sched_priority s_priority;
}; };
extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
...@@ -114,18 +128,6 @@ struct amd_sched_backend_ops { ...@@ -114,18 +128,6 @@ struct amd_sched_backend_ops {
void (*free_job)(struct amd_sched_job *sched_job); void (*free_job)(struct amd_sched_job *sched_job);
}; };
enum amd_sched_priority {
AMD_SCHED_PRIORITY_MIN,
AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
AMD_SCHED_PRIORITY_NORMAL,
AMD_SCHED_PRIORITY_HIGH_SW,
AMD_SCHED_PRIORITY_HIGH_HW,
AMD_SCHED_PRIORITY_KERNEL,
AMD_SCHED_PRIORITY_MAX,
AMD_SCHED_PRIORITY_INVALID = -1,
AMD_SCHED_PRIORITY_UNSET = -2
};
/** /**
* One scheduler is implemented for each hardware ring * One scheduler is implemented for each hardware ring
*/ */
...@@ -176,10 +178,4 @@ bool amd_sched_dependency_optimized(struct dma_fence* fence, ...@@ -176,10 +178,4 @@ bool amd_sched_dependency_optimized(struct dma_fence* fence,
struct amd_sched_entity *entity); struct amd_sched_entity *entity);
void amd_sched_job_kickout(struct amd_sched_job *s_job); void amd_sched_job_kickout(struct amd_sched_job *s_job);
static inline enum amd_sched_priority
amd_sched_get_job_priority(struct amd_sched_job *job)
{
return (job->s_entity->rq - job->sched->sched_rq);
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment