Commit ce882e6d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove v_seq handling from the scheduler v2

Simply not used any more. Only keep 32bit atomic for fence sequence numbering.

v2: trivial rebase
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1)
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com> (v1)
Reviewed-by: Chunming Zhou <david1.zhou@amd.com> (v1)
parent 4ce9891e
...@@ -1047,7 +1047,7 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx); ...@@ -1047,7 +1047,7 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx); struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence, uint64_t queued_seq); struct fence *fence);
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq); struct amdgpu_ring *ring, uint64_t seq);
......
...@@ -866,11 +866,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -866,11 +866,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
kfree(job); kfree(job);
goto out; goto out;
} }
job->ibs[parser->num_ibs - 1].sequence = cs->out.handle =
amdgpu_ctx_add_fence(job->ctx, ring, amdgpu_ctx_add_fence(job->ctx, ring,
&job->base.s_fence->base, &job->base.s_fence->base);
job->base.s_fence->v_seq);
cs->out.handle = job->base.s_fence->v_seq;
list_sort(NULL, &parser->validated, cmp_size_smaller_first); list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket, ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated, &parser->validated,
......
...@@ -236,17 +236,13 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) ...@@ -236,17 +236,13 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
} }
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence, uint64_t queued_seq) struct fence *fence)
{ {
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = 0; uint64_t seq = cring->sequence;
unsigned idx = 0; unsigned idx = 0;
struct fence *other = NULL; struct fence *other = NULL;
if (amdgpu_enable_scheduler)
seq = queued_seq;
else
seq = cring->sequence;
idx = seq % AMDGPU_CTX_MAX_CS_PENDING; idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
other = cring->fences[idx]; other = cring->fences[idx];
if (other) { if (other) {
...@@ -260,7 +256,6 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, ...@@ -260,7 +256,6 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_lock(&ctx->ring_lock); spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence; cring->fences[idx] = fence;
if (!amdgpu_enable_scheduler)
cring->sequence++; cring->sequence++;
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
...@@ -274,21 +269,16 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, ...@@ -274,21 +269,16 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
{ {
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
struct fence *fence; struct fence *fence;
uint64_t queued_seq;
spin_lock(&ctx->ring_lock); spin_lock(&ctx->ring_lock);
if (amdgpu_enable_scheduler)
queued_seq = amd_sched_next_queued_seq(&cring->entity);
else
queued_seq = cring->sequence;
if (seq >= queued_seq) { if (seq >= cring->sequence) {
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) { if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
return NULL; return NULL;
} }
......
...@@ -126,7 +126,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -126,7 +126,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx, *old_ctx; struct amdgpu_ctx *ctx, *old_ctx;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
uint64_t sequence;
unsigned i; unsigned i;
int r = 0; int r = 0;
...@@ -199,12 +198,9 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -199,12 +198,9 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
return r; return r;
} }
sequence = amdgpu_enable_scheduler ? ib->sequence : 0;
if (!amdgpu_enable_scheduler && ib->ctx) if (!amdgpu_enable_scheduler && ib->ctx)
ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
&ib->fence->base, &ib->fence->base);
sequence);
/* wrap the last IB with fence */ /* wrap the last IB with fence */
if (ib->user) { if (ib->user) {
......
...@@ -435,8 +435,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, ...@@ -435,8 +435,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
seq_printf(m, " protected by 0x%016llx on ring %d", seq_printf(m, " protected by 0x%016llx on ring %d",
a_fence->seq, a_fence->ring->idx); a_fence->seq, a_fence->ring->idx);
if (s_fence) if (s_fence)
seq_printf(m, " protected by 0x%016llx on ring %d", seq_printf(m, " protected by 0x%016x on ring %d",
s_fence->v_seq, s_fence->base.seqno,
s_fence->entity->scheduler->ring_id); s_fence->entity->scheduler->ring_id);
} }
......
...@@ -111,7 +111,6 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -111,7 +111,6 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
kfree(job); kfree(job);
return r; return r;
} }
ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq;
*f = fence_get(&job->base.s_fence->base); *f = fence_get(&job->base.s_fence->base);
mutex_unlock(&job->job_lock); mutex_unlock(&job->job_lock);
} else { } else {
......
...@@ -156,14 +156,12 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, ...@@ -156,14 +156,12 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_rq *rq, struct amd_sched_rq *rq,
uint32_t jobs) uint32_t jobs)
{ {
uint64_t seq_ring = 0;
char name[20]; char name[20];
if (!(sched && entity && rq)) if (!(sched && entity && rq))
return -EINVAL; return -EINVAL;
memset(entity, 0, sizeof(struct amd_sched_entity)); memset(entity, 0, sizeof(struct amd_sched_entity));
seq_ring = ((uint64_t)sched->ring_id) << 60;
spin_lock_init(&entity->lock); spin_lock_init(&entity->lock);
entity->belongto_rq = rq; entity->belongto_rq = rq;
entity->scheduler = sched; entity->scheduler = sched;
...@@ -179,8 +177,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, ...@@ -179,8 +177,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return -EINVAL; return -EINVAL;
spin_lock_init(&entity->queue_lock); spin_lock_init(&entity->queue_lock);
atomic64_set(&entity->last_queued_v_seq, seq_ring); atomic_set(&entity->fence_seq, 0);
atomic64_set(&entity->last_signaled_v_seq, seq_ring);
/* Add the entity to the run queue */ /* Add the entity to the run queue */
amd_sched_rq_add_entity(rq, entity); amd_sched_rq_add_entity(rq, entity);
...@@ -299,8 +296,6 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) ...@@ -299,8 +296,6 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
unsigned long flags; unsigned long flags;
sched = sched_job->sched; sched = sched_job->sched;
atomic64_set(&sched_job->s_entity->last_signaled_v_seq,
sched_job->s_fence->v_seq);
amd_sched_fence_signal(sched_job->s_fence); amd_sched_fence_signal(sched_job->s_fence);
spin_lock_irqsave(&sched->queue_lock, flags); spin_lock_irqsave(&sched->queue_lock, flags);
list_del(&sched_job->list); list_del(&sched_job->list);
...@@ -421,15 +416,3 @@ int amd_sched_destroy(struct amd_gpu_scheduler *sched) ...@@ -421,15 +416,3 @@ int amd_sched_destroy(struct amd_gpu_scheduler *sched)
kfree(sched); kfree(sched);
return 0; return 0;
} }
/**
* Get next queued sequence number
*
* @entity The context entity
*
* return the next queued sequence number
*/
uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity)
{
return atomic64_read(&c_entity->last_queued_v_seq) + 1;
}
...@@ -42,9 +42,7 @@ struct amd_sched_entity { ...@@ -42,9 +42,7 @@ struct amd_sched_entity {
struct list_head list; struct list_head list;
struct amd_sched_rq *belongto_rq; struct amd_sched_rq *belongto_rq;
spinlock_t lock; spinlock_t lock;
/* the virtual_seq is unique per context per ring */ atomic_t fence_seq;
atomic64_t last_queued_v_seq;
atomic64_t last_signaled_v_seq;
/* the job_queue maintains the jobs submitted by clients */ /* the job_queue maintains the jobs submitted by clients */
struct kfifo job_queue; struct kfifo job_queue;
spinlock_t queue_lock; spinlock_t queue_lock;
...@@ -72,7 +70,6 @@ struct amd_sched_fence { ...@@ -72,7 +70,6 @@ struct amd_sched_fence {
struct fence base; struct fence base;
struct fence_cb cb; struct fence_cb cb;
struct amd_sched_entity *entity; struct amd_sched_entity *entity;
uint64_t v_seq;
spinlock_t lock; spinlock_t lock;
}; };
...@@ -148,8 +145,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, ...@@ -148,8 +145,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
int amd_sched_entity_fini(struct amd_gpu_scheduler *sched, int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity); struct amd_sched_entity *entity);
uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity); struct amd_sched_entity *s_entity);
void amd_sched_fence_signal(struct amd_sched_fence *fence); void amd_sched_fence_signal(struct amd_sched_fence *fence);
......
...@@ -30,16 +30,19 @@ ...@@ -30,16 +30,19 @@
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity) struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity)
{ {
struct amd_sched_fence *fence = NULL; struct amd_sched_fence *fence = NULL;
unsigned seq;
fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
if (fence == NULL) if (fence == NULL)
return NULL; return NULL;
fence->v_seq = atomic64_inc_return(&s_entity->last_queued_v_seq);
fence->entity = s_entity; fence->entity = s_entity;
spin_lock_init(&fence->lock); spin_lock_init(&fence->lock);
fence_init(&fence->base, &amd_sched_fence_ops,
&fence->lock, seq = atomic_inc_return(&s_entity->fence_seq);
s_entity->fence_context, fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
fence->v_seq); s_entity->fence_context, seq);
return fence; return fence;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment