Commit 3320b8d2 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove job->ring

We can easily get that from the scheduler.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0e28b10f
...@@ -1027,6 +1027,7 @@ struct amdgpu_cs_parser { ...@@ -1027,6 +1027,7 @@ struct amdgpu_cs_parser {
/* scheduler job object */ /* scheduler job object */
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ring *ring;
/* buffer objects */ /* buffer objects */
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
......
...@@ -912,11 +912,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, ...@@ -912,11 +912,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
{ {
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->job->ring; struct amdgpu_ring *ring = p->ring;
int r; int r;
/* Only for UVD/VCE VM emulation */ /* Only for UVD/VCE VM emulation */
if (p->job->ring->funcs->parse_cs) { if (p->ring->funcs->parse_cs) {
unsigned i, j; unsigned i, j;
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
...@@ -1030,10 +1030,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -1030,10 +1030,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
} }
} }
if (parser->job->ring && parser->job->ring != ring) if (parser->ring && parser->ring != ring)
return -EINVAL; return -EINVAL;
parser->job->ring = ring; parser->ring = ring;
r = amdgpu_ib_get(adev, vm, r = amdgpu_ib_get(adev, vm,
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
...@@ -1052,11 +1052,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -1052,11 +1052,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */ /* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && ( if (parser->job->uf_addr && (
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL; return -EINVAL;
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx); return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
} }
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
...@@ -1207,7 +1207,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) ...@@ -1207,7 +1207,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs) union drm_amdgpu_cs *cs)
{ {
struct amdgpu_ring *ring = p->job->ring; struct amdgpu_ring *ring = p->ring;
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job; struct amdgpu_job *job;
unsigned i; unsigned i;
...@@ -1256,7 +1256,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1256,7 +1256,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = seq; job->uf_sequence = seq;
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, job->base.s_priority); amdgpu_ring_priority_get(p->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job); trace_amdgpu_cs_ioctl(job);
drm_sched_entity_push_job(&job->base, entity); drm_sched_entity_push_job(&job->base, entity);
......
...@@ -3253,7 +3253,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -3253,7 +3253,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
kthread_park(ring->sched.thread); kthread_park(ring->sched.thread);
if (job && job->ring->idx != i) if (job && job->base.sched == &ring->sched)
continue; continue;
drm_sched_hw_job_reset(&ring->sched, &job->base); drm_sched_hw_job_reset(&ring->sched, &job->base);
...@@ -3277,7 +3277,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -3277,7 +3277,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL) * or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished * after above amdgpu_reset accomplished
*/ */
if ((!job || job->ring->idx == i) && !r) if ((!job || job->base.sched == &ring->sched) && !r)
drm_sched_job_recovery(&ring->sched); drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread); kthread_unpark(ring->sched.thread);
......
...@@ -30,12 +30,12 @@ ...@@ -30,12 +30,12 @@
static void amdgpu_job_timedout(struct drm_sched_job *s_job) static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{ {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
job->base.sched->name, job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
atomic_read(&job->ring->fence_drv.last_seq), ring->fence_drv.sync_seq);
job->ring->fence_drv.sync_seq);
amdgpu_device_gpu_recover(job->adev, job, false); amdgpu_device_gpu_recover(job->adev, job, false);
} }
...@@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) ...@@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
static void amdgpu_job_free_cb(struct drm_sched_job *s_job) static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{ {
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
amdgpu_ring_priority_put(job->ring, s_job->s_priority); amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence); dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync); amdgpu_sync_free(&job->sched_sync);
...@@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job) ...@@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f) void *owner, struct dma_fence **f)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched);
int r; int r;
if (!f) if (!f)
...@@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, ...@@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
return r; return r;
job->owner = owner; job->owner = owner;
job->ring = to_amdgpu_ring(entity->sched);
*f = dma_fence_get(&job->base.s_fence->finished); *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, job->base.s_priority); amdgpu_ring_priority_get(ring, job->base.s_priority);
drm_sched_entity_push_job(&job->base, entity); drm_sched_entity_push_job(&job->base, entity);
return 0; return 0;
...@@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, ...@@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity) struct drm_sched_entity *s_entity)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm; struct amdgpu_vm *vm = job->vm;
bool explicit = false; bool explicit = false;
...@@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, ...@@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
} }
while (fence == NULL && vm && !job->vmid) { while (fence == NULL && vm && !job->vmid) {
struct amdgpu_ring *ring = job->ring;
r = amdgpu_vmid_grab(vm, ring, &job->sync, r = amdgpu_vmid_grab(vm, ring, &job->sync,
&job->base.s_fence->finished, &job->base.s_fence->finished,
job); job);
...@@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, ...@@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished; struct dma_fence *fence = NULL, *finished;
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct amdgpu_job *job; struct amdgpu_job *job;
...@@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) ...@@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
if (finished->error < 0) { if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n"); DRM_INFO("Skip scheduling IBs!\n");
} else { } else {
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence); &fence);
if (r) if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r); DRM_ERROR("Error scheduling IBs (%d)\n", r);
......
...@@ -37,7 +37,6 @@ struct amdgpu_job { ...@@ -37,7 +37,6 @@ struct amdgpu_job {
struct drm_sched_job base; struct drm_sched_job base;
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
struct amdgpu_sync sync; struct amdgpu_sync sync;
struct amdgpu_sync sched_sync; struct amdgpu_sync sched_sync;
struct amdgpu_ib *ibs; struct amdgpu_ib *ibs;
......
...@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs, ...@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign( TP_fast_assign(
__entry->bo_list = p->bo_list; __entry->bo_list = p->bo_list;
__entry->ring = p->job->ring->idx; __entry->ring = p->ring->idx;
__entry->dw = p->job->ibs[i].length_dw; __entry->dw = p->job->ibs[i].length_dw;
__entry->fences = amdgpu_fence_count_emitted( __entry->fences = amdgpu_fence_count_emitted(
p->job->ring); p->ring);
), ),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw, __entry->bo_list, __entry->ring, __entry->dw,
...@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, ...@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context; __entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno; __entry->seqno = job->base.s_fence->finished.seqno;
__entry->ring_name = job->ring->name; __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
__entry->num_ibs = job->num_ibs; __entry->num_ibs = job->num_ibs;
), ),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
...@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job, ...@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context; __entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno; __entry->seqno = job->base.s_fence->finished.seqno;
__entry->ring_name = job->ring->name; __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
__entry->num_ibs = job->num_ibs; __entry->num_ibs = job->num_ibs;
), ),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
......
...@@ -692,11 +692,11 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -692,11 +692,11 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
struct amdgpu_bo *bo, unsigned offset) struct amdgpu_bo *bo, unsigned offset)
{ {
struct amdgpu_device *adev = ctx->parser->adev; struct amdgpu_device *adev = ctx->parser->adev;
uint32_t ip_instance = ctx->parser->ring->me;
int32_t *msg, msg_type, handle; int32_t *msg, msg_type, handle;
void *ptr; void *ptr;
long r; long r;
int i; int i;
uint32_t ip_instance = ctx->parser->job->ring->me;
if (offset & 0x3F) { if (offset & 0x3F) {
DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment