Commit 1d7dd229 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: clean up amd sched wait_ts and wait_signal

Remove code not used at the moment.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent 7fc11959
...@@ -90,12 +90,6 @@ static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job) ...@@ -90,12 +90,6 @@ static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
ring = fence->ring; ring = fence->ring;
adev = ring->adev; adev = ring->adev;
if (sched_job->ctx) {
c_entity = &sched_job->ctx->rings[ring->idx].c_entity;
atomic64_set(&c_entity->last_signaled_v_seq,
sched_job->ibs[sched_job->num_ibs - 1].sequence);
}
/* wake up users waiting for time stamp */ /* wake up users waiting for time stamp */
wake_up_all(&c_entity->wait_queue); wake_up_all(&c_entity->wait_queue);
......
...@@ -208,7 +208,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, ...@@ -208,7 +208,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
entity->context_id = context_id; entity->context_id = context_id;
atomic64_set(&entity->last_emitted_v_seq, seq_ring); atomic64_set(&entity->last_emitted_v_seq, seq_ring);
atomic64_set(&entity->last_queued_v_seq, seq_ring); atomic64_set(&entity->last_queued_v_seq, seq_ring);
atomic64_set(&entity->last_signaled_v_seq, seq_ring);
/* Add the entity to the run queue */ /* Add the entity to the run queue */
mutex_lock(&rq->lock); mutex_lock(&rq->lock);
...@@ -317,20 +316,7 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched, ...@@ -317,20 +316,7 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
} }
/** /**
* Check the virtual sequence number for specified context * Wait for a virtual sequence number to be emitted.
*
* @seq The virtual sequence number to check
* @c_entity The pointer to a valid amd_context_entity
*
* return 0 if signaled, -1 else.
*/
int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq)
{
return (seq <= atomic64_read(&c_entity->last_signaled_v_seq)) ? 0 : -1;
}
/**
* Wait for a virtual sequence number to be signaled or timeout
* *
* @c_entity The pointer to a valid context entity * @c_entity The pointer to a valid context entity
* @seq The virtual sequence number to wait * @seq The virtual sequence number to wait
...@@ -340,16 +326,13 @@ int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq) ...@@ -340,16 +326,13 @@ int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq)
* *
* return =0 signaled , <0 failed * return =0 signaled , <0 failed
*/ */
static int amd_sched_wait(struct amd_context_entity *c_entity, int amd_sched_wait_emit(struct amd_context_entity *c_entity,
uint64_t seq, uint64_t seq,
bool intr, bool intr,
long timeout, long timeout)
bool emit)
{ {
atomic64_t *v_seq = emit ? &c_entity->last_emitted_v_seq : atomic64_t *v_seq = &c_entity->last_emitted_v_seq;
&c_entity->last_signaled_v_seq; wait_queue_head_t *wait_queue = &c_entity->wait_emit;
wait_queue_head_t *wait_queue = emit ? &c_entity->wait_emit :
&c_entity->wait_queue;
if (intr && (timeout < 0)) { if (intr && (timeout < 0)) {
wait_event_interruptible( wait_event_interruptible(
...@@ -379,22 +362,6 @@ static int amd_sched_wait(struct amd_context_entity *c_entity, ...@@ -379,22 +362,6 @@ static int amd_sched_wait(struct amd_context_entity *c_entity,
return 0; return 0;
} }
int amd_sched_wait_signal(struct amd_context_entity *c_entity,
uint64_t seq,
bool intr,
long timeout)
{
return amd_sched_wait(c_entity, seq, intr, timeout, false);
}
int amd_sched_wait_emit(struct amd_context_entity *c_entity,
uint64_t seq,
bool intr,
long timeout)
{
return amd_sched_wait(c_entity, seq, intr, timeout, true);
}
static int amd_sched_main(void *param) static int amd_sched_main(void *param)
{ {
int r; int r;
......
...@@ -74,7 +74,6 @@ struct amd_context_entity { ...@@ -74,7 +74,6 @@ struct amd_context_entity {
/* the virtual_seq is unique per context per ring */ /* the virtual_seq is unique per context per ring */
atomic64_t last_queued_v_seq; atomic64_t last_queued_v_seq;
atomic64_t last_emitted_v_seq; atomic64_t last_emitted_v_seq;
atomic64_t last_signaled_v_seq;
pid_t tgid; pid_t tgid;
uint32_t context_id; uint32_t context_id;
/* the job_queue maintains the jobs submitted by clients */ /* the job_queue maintains the jobs submitted by clients */
...@@ -134,10 +133,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched, ...@@ -134,10 +133,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
struct amd_context_entity *c_entity, struct amd_context_entity *c_entity,
void *job); void *job);
int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq);
int amd_sched_wait_signal(struct amd_context_entity *c_entity,
uint64_t seq, bool intr, long timeout);
int amd_sched_wait_emit(struct amd_context_entity *c_entity, int amd_sched_wait_emit(struct amd_context_entity *c_entity,
uint64_t seq, uint64_t seq,
bool intr, bool intr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment