Commit b07c60c0 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: move ring from IBs into job

We can't submit to multiple rings at the same time anyway.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 9e5d5309
...@@ -771,7 +771,6 @@ struct amdgpu_ib { ...@@ -771,7 +771,6 @@ struct amdgpu_ib {
uint32_t length_dw; uint32_t length_dw;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t *ptr; uint32_t *ptr;
struct amdgpu_ring *ring;
struct amdgpu_fence *fence; struct amdgpu_fence *fence;
struct amdgpu_user_fence *user; struct amdgpu_user_fence *user;
bool grabbed_vmid; bool grabbed_vmid;
...@@ -1178,10 +1177,10 @@ struct amdgpu_gfx { ...@@ -1178,10 +1177,10 @@ struct amdgpu_gfx {
unsigned ce_ram_size; unsigned ce_ram_size;
}; };
int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib); unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib, void *owner); struct amdgpu_ib *ib, void *owner);
int amdgpu_ib_pool_init(struct amdgpu_device *adev); int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
...@@ -1239,6 +1238,7 @@ struct amdgpu_cs_parser { ...@@ -1239,6 +1238,7 @@ struct amdgpu_cs_parser {
struct amdgpu_job { struct amdgpu_job {
struct amd_sched_job base; struct amd_sched_job base;
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct amdgpu_ring *ring;
struct amdgpu_ib *ibs; struct amdgpu_ib *ibs;
uint32_t num_ibs; uint32_t num_ibs;
void *owner; void *owner;
......
...@@ -542,26 +542,25 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, ...@@ -542,26 +542,25 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
} }
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_cs_parser *parser) struct amdgpu_cs_parser *p)
{ {
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring; struct amdgpu_ring *ring = p->job->ring;
int i, r; int i, r;
/* Only for UVD/VCE VM emulation */ /* Only for UVD/VCE VM emulation */
for (i = 0; i < parser->job->num_ibs; i++) { if (ring->funcs->parse_cs) {
ring = parser->job->ibs[i].ring; for (i = 0; i < p->job->num_ibs; i++) {
if (ring->funcs->parse_cs) { r = amdgpu_ring_parse_cs(ring, p, i);
r = amdgpu_ring_parse_cs(ring, parser, i);
if (r) if (r)
return r; return r;
} }
} }
r = amdgpu_bo_vm_update_pte(parser, vm); r = amdgpu_bo_vm_update_pte(p, vm);
if (!r) if (!r)
amdgpu_cs_sync_rings(parser); amdgpu_cs_sync_rings(p);
return r; return r;
} }
...@@ -603,6 +602,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -603,6 +602,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (r) if (r)
return r; return r;
if (parser->job->ring && parser->job->ring != ring)
return -EINVAL;
parser->job->ring = ring;
if (ring->funcs->parse_cs) { if (ring->funcs->parse_cs) {
struct amdgpu_bo_va_mapping *m; struct amdgpu_bo_va_mapping *m;
struct amdgpu_bo *aobj = NULL; struct amdgpu_bo *aobj = NULL;
...@@ -631,7 +635,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -631,7 +635,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset; kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
if (r) { if (r) {
DRM_ERROR("Failed to get ib !\n"); DRM_ERROR("Failed to get ib !\n");
return r; return r;
...@@ -640,7 +644,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -640,7 +644,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj); amdgpu_bo_kunmap(aobj);
} else { } else {
r = amdgpu_ib_get(ring, vm, 0, ib); r = amdgpu_ib_get(adev, vm, 0, ib);
if (r) { if (r) {
DRM_ERROR("Failed to get ib !\n"); DRM_ERROR("Failed to get ib !\n");
return r; return r;
...@@ -680,8 +684,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -680,8 +684,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
/* UVD & VCE fw doesn't support user fences */ /* UVD & VCE fw doesn't support user fences */
if (ib->ring->type == AMDGPU_RING_TYPE_UVD || if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
ib->ring->type == AMDGPU_RING_TYPE_VCE) parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
return -EINVAL; return -EINVAL;
ib->user = &parser->job->uf; ib->user = &parser->job->uf;
...@@ -757,7 +761,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job) ...@@ -757,7 +761,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs) union drm_amdgpu_cs *cs)
{ {
struct amdgpu_ring * ring = p->job->ibs->ring; struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_fence *fence; struct amd_sched_fence *fence;
struct amdgpu_job *job; struct amdgpu_job *job;
...@@ -766,7 +770,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -766,7 +770,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->base.sched = &ring->sched; job->base.sched = &ring->sched;
job->base.s_entity = &p->ctx->rings[ring->idx].entity; job->base.s_entity = &p->ctx->rings[ring->idx].entity;
job->adev = p->adev;
job->owner = p->filp; job->owner = p->filp;
job->free_job = amdgpu_cs_free_job; job->free_job = amdgpu_cs_free_job;
......
...@@ -55,10 +55,9 @@ static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); ...@@ -55,10 +55,9 @@ static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
* suballocator. * suballocator.
* Returns 0 on success, error on failure. * Returns 0 on success, error on failure.
*/ */
int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib) unsigned size, struct amdgpu_ib *ib)
{ {
struct amdgpu_device *adev = ring->adev;
int r; int r;
if (size) { if (size) {
...@@ -77,7 +76,6 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, ...@@ -77,7 +76,6 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
amdgpu_sync_create(&ib->sync); amdgpu_sync_create(&ib->sync);
ib->ring = ring;
ib->vm = vm; ib->vm = vm;
return 0; return 0;
...@@ -120,11 +118,11 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) ...@@ -120,11 +118,11 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
* to SI there was just a DE IB. * to SI there was just a DE IB.
*/ */
int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, void *owner) struct amdgpu_ib *ibs, void *owner)
{ {
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0]; struct amdgpu_ib *ib = &ibs[0];
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx, *old_ctx; struct amdgpu_ctx *ctx, *old_ctx;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
unsigned i; unsigned i;
...@@ -133,7 +131,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -133,7 +131,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (num_ibs == 0) if (num_ibs == 0)
return -EINVAL; return -EINVAL;
ring = ibs->ring;
ctx = ibs->ctx; ctx = ibs->ctx;
vm = ibs->vm; vm = ibs->vm;
...@@ -178,7 +175,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -178,7 +175,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
for (i = 0; i < num_ibs; ++i) { for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i]; ib = &ibs[i];
if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { if (ib->ctx != ctx || ib->vm != vm) {
ring->current_ctx = old_ctx; ring->current_ctx = old_ctx;
amdgpu_ring_undo(ring); amdgpu_ring_undo(ring);
return -EINVAL; return -EINVAL;
......
...@@ -70,7 +70,7 @@ static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) ...@@ -70,7 +70,7 @@ static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
struct fence *fence = amdgpu_sync_get_fence(sync); struct fence *fence = amdgpu_sync_get_fence(sync);
if (fence == NULL && vm && !job->ibs->grabbed_vmid) { if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
struct amdgpu_ring *ring = job->ibs->ring; struct amdgpu_ring *ring = job->ring;
int r; int r;
r = amdgpu_vm_grab_id(vm, ring, sync, r = amdgpu_vm_grab_id(vm, ring, sync,
...@@ -98,7 +98,7 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) ...@@ -98,7 +98,7 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
} }
job = to_amdgpu_job(sched_job); job = to_amdgpu_job(sched_job);
trace_amdgpu_sched_run_job(job); trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner);
if (r) { if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r); DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err; goto err;
...@@ -142,6 +142,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -142,6 +142,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
*f = fence_get(&job->base.s_fence->base); *f = fence_get(&job->base.s_fence->base);
job->adev = adev; job->adev = adev;
job->ring = ring;
job->ibs = ibs; job->ibs = ibs;
job->num_ibs = num_ibs; job->num_ibs = num_ibs;
job->owner = owner; job->owner = owner;
......
...@@ -38,10 +38,10 @@ TRACE_EVENT(amdgpu_cs, ...@@ -38,10 +38,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign( TP_fast_assign(
__entry->bo_list = p->bo_list; __entry->bo_list = p->bo_list;
__entry->ring = p->job->ibs[i].ring->idx; __entry->ring = p->job->ring->idx;
__entry->dw = p->job->ibs[i].length_dw; __entry->dw = p->job->ibs[i].length_dw;
__entry->fences = amdgpu_fence_count_emitted( __entry->fences = amdgpu_fence_count_emitted(
p->job->ibs[i].ring); p->job->ring);
), ),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw, __entry->bo_list, __entry->ring, __entry->dw,
...@@ -65,7 +65,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, ...@@ -65,7 +65,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__entry->sched_job = &job->base; __entry->sched_job = &job->base;
__entry->ib = job->ibs; __entry->ib = job->ibs;
__entry->fence = &job->base.s_fence->base; __entry->fence = &job->base.s_fence->base;
__entry->ring_name = job->ibs[0].ring->name; __entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs; __entry->num_ibs = job->num_ibs;
), ),
TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
...@@ -90,7 +90,7 @@ TRACE_EVENT(amdgpu_sched_run_job, ...@@ -90,7 +90,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__entry->sched_job = &job->base; __entry->sched_job = &job->base;
__entry->ib = job->ibs; __entry->ib = job->ibs;
__entry->fence = &job->base.s_fence->base; __entry->fence = &job->base.s_fence->base;
__entry->ring_name = job->ibs[0].ring->name; __entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs; __entry->num_ibs = job->num_ibs;
), ),
TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
......
...@@ -1030,7 +1030,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, ...@@ -1030,7 +1030,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (!ib) if (!ib)
return -ENOMEM; return -ENOMEM;
r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib); r = amdgpu_ib_get(adev, NULL, num_dw * 4, ib);
if (r) { if (r) {
kfree(ib); kfree(ib);
return r; return r;
......
...@@ -867,7 +867,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, ...@@ -867,7 +867,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
r = -ENOMEM; r = -ENOMEM;
goto err; goto err;
} }
r = amdgpu_ib_get(ring, NULL, 64, ib); r = amdgpu_ib_get(adev, NULL, 64, ib);
if (r) if (r)
goto err1; goto err1;
......
...@@ -377,7 +377,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -377,7 +377,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib) if (!ib)
return -ENOMEM; return -ENOMEM;
r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); r = amdgpu_ib_get(adev, NULL, ib_size_dw * 4, ib);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
kfree(ib); kfree(ib);
...@@ -463,7 +463,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -463,7 +463,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (!ib) if (!ib)
return -ENOMEM; return -ENOMEM;
r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); r = amdgpu_ib_get(adev, NULL, ib_size_dw * 4, ib);
if (r) { if (r) {
kfree(ib); kfree(ib);
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
......
...@@ -355,7 +355,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -355,7 +355,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (!ib) if (!ib)
goto error; goto error;
r = amdgpu_ib_get(ring, NULL, 64, ib); r = amdgpu_ib_get(adev, NULL, 64, ib);
if (r) if (r)
goto error_free; goto error_free;
...@@ -448,7 +448,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -448,7 +448,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (!ib) if (!ib)
return -ENOMEM; return -ENOMEM;
r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); r = amdgpu_ib_get(adev, NULL, ndw * 4, ib);
if (r) { if (r) {
kfree(ib); kfree(ib);
return r; return r;
...@@ -737,7 +737,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -737,7 +737,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (!ib) if (!ib)
return -ENOMEM; return -ENOMEM;
r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); r = amdgpu_ib_get(adev, NULL, ndw * 4, ib);
if (r) { if (r) {
kfree(ib); kfree(ib);
return r; return r;
......
...@@ -621,7 +621,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ...@@ -621,7 +621,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD; tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp); adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib)); memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib); r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err0; goto err0;
......
...@@ -2631,7 +2631,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -2631,7 +2631,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
} }
WREG32(scratch, 0xCAFEDEAD); WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib)); memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib); r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err1; goto err1;
......
...@@ -699,7 +699,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -699,7 +699,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
} }
WREG32(scratch, 0xCAFEDEAD); WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib)); memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib); r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err1; goto err1;
...@@ -1171,7 +1171,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ...@@ -1171,7 +1171,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* allocate an indirect buffer to put the commands in */ /* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib)); memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, total_size, &ib); r = amdgpu_ib_get(adev, NULL, total_size, &ib);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r; return r;
......
...@@ -674,7 +674,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ...@@ -674,7 +674,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD; tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp); adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib)); memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib); r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err0; goto err0;
......
...@@ -825,7 +825,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -825,7 +825,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD; tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp); adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib)); memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib); r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err0; goto err0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment