Commit 2bb795f5 authored by James Zhu's avatar James Zhu Committed by Alex Deucher

drm/amdgpu/vg20:Restruct uvd to support multiple uvds

Vega20 has dual-UVD. Need Restruct amdgpu_device::uvd to support
multiple uvds. There are no any logical changes here.
Signed-off-by: default avatarJames Zhu <James.Zhu@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 602ed6c6
...@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ...@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
uint64_t index; uint64_t index;
if (ring != &adev->uvd.ring) { if (ring != &adev->uvd.inst->ring) {
ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
} else { } else {
/* put fence directly behind firmware */ /* put fence directly behind firmware */
index = ALIGN(adev->uvd.fw->size, 8); index = ALIGN(adev->uvd.fw->size, 8);
ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index;
ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index;
} }
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type); amdgpu_irq_get(adev, irq_src, irq_type);
......
...@@ -348,7 +348,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -348,7 +348,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD; type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask = adev->uvd.ring.ready ? 1 : 0; ring_mask = adev->uvd.inst->ring.ready ? 1 : 0;
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 16; ib_size_alignment = 16;
break; break;
...@@ -362,7 +362,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -362,7 +362,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD; type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_enc_rings; i++) for (i = 0; i < adev->uvd.num_enc_rings; i++)
ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i); ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 1; ib_size_alignment = 1;
break; break;
......
...@@ -77,13 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, ...@@ -77,13 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = &adev->sdma.instance[ring].ring; *out_ring = &adev->sdma.instance[ring].ring;
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
*out_ring = &adev->uvd.ring; *out_ring = &adev->uvd.inst->ring;
break; break;
case AMDGPU_HW_IP_VCE: case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring]; *out_ring = &adev->vce.ring[ring];
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
*out_ring = &adev->uvd.ring_enc[ring]; *out_ring = &adev->uvd.inst->ring_enc[ring];
break; break;
case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec; *out_ring = &adev->vcn.ring_dec;
......
...@@ -129,7 +129,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -129,7 +129,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned version_major, version_minor, family_id; unsigned version_major, version_minor, family_id;
int i, r; int i, r;
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) { switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
...@@ -237,16 +237,16 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -237,16 +237,16 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo,
&adev->uvd.gpu_addr, &adev->uvd.cpu_addr); &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r; return r;
} }
ring = &adev->uvd.ring; ring = &adev->uvd.inst->ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity,
rq, NULL); rq, NULL);
if (r != 0) { if (r != 0) {
DRM_ERROR("Failed setting up UVD run queue.\n"); DRM_ERROR("Failed setting up UVD run queue.\n");
...@@ -254,8 +254,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -254,8 +254,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
} }
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0); atomic_set(&adev->uvd.inst->handles[i], 0);
adev->uvd.filp[i] = NULL; adev->uvd.inst->filp[i] = NULL;
} }
/* from uvd v5.0 HW addressing capacity increased to 64 bits */ /* from uvd v5.0 HW addressing capacity increased to 64 bits */
...@@ -285,18 +285,18 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -285,18 +285,18 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
int i; int i;
kfree(adev->uvd.saved_bo); kfree(adev->uvd.inst->saved_bo);
drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity);
amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo,
&adev->uvd.gpu_addr, &adev->uvd.inst->gpu_addr,
(void **)&adev->uvd.cpu_addr); (void **)&adev->uvd.inst->cpu_addr);
amdgpu_ring_fini(&adev->uvd.ring); amdgpu_ring_fini(&adev->uvd.inst->ring);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]); amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
release_firmware(adev->uvd.fw); release_firmware(adev->uvd.fw);
...@@ -309,29 +309,29 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) ...@@ -309,29 +309,29 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
void *ptr; void *ptr;
int i; int i;
if (adev->uvd.vcpu_bo == NULL) if (adev->uvd.inst->vcpu_bo == NULL)
return 0; return 0;
cancel_delayed_work_sync(&adev->uvd.idle_work); cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
/* only valid for physical mode */ /* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) { if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i) for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i])) if (atomic_read(&adev->uvd.inst->handles[i]))
break; break;
if (i == adev->uvd.max_handles) if (i == adev->uvd.max_handles)
return 0; return 0;
} }
size = amdgpu_bo_size(adev->uvd.vcpu_bo); size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
ptr = adev->uvd.cpu_addr; ptr = adev->uvd.inst->cpu_addr;
adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL);
if (!adev->uvd.saved_bo) if (!adev->uvd.inst->saved_bo)
return -ENOMEM; return -ENOMEM;
memcpy_fromio(adev->uvd.saved_bo, ptr, size); memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size);
return 0; return 0;
} }
...@@ -341,16 +341,16 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ...@@ -341,16 +341,16 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
unsigned size; unsigned size;
void *ptr; void *ptr;
if (adev->uvd.vcpu_bo == NULL) if (adev->uvd.inst->vcpu_bo == NULL)
return -EINVAL; return -EINVAL;
size = amdgpu_bo_size(adev->uvd.vcpu_bo); size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
ptr = adev->uvd.cpu_addr; ptr = adev->uvd.inst->cpu_addr;
if (adev->uvd.saved_bo != NULL) { if (adev->uvd.inst->saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.saved_bo, size); memcpy_toio(ptr, adev->uvd.inst->saved_bo, size);
kfree(adev->uvd.saved_bo); kfree(adev->uvd.inst->saved_bo);
adev->uvd.saved_bo = NULL; adev->uvd.inst->saved_bo = NULL;
} else { } else {
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
unsigned offset; unsigned offset;
...@@ -358,14 +358,14 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ...@@ -358,14 +358,14 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data; hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes); offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes)); le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes); size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes);
} }
memset_io(ptr, 0, size); memset_io(ptr, 0, size);
/* to restore uvd fence seq */ /* to restore uvd fence seq */
amdgpu_fence_driver_force_completion(&adev->uvd.ring); amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring);
} }
return 0; return 0;
...@@ -373,12 +373,12 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ...@@ -373,12 +373,12 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
int i, r; int i, r;
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]); uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) { if (handle != 0 && adev->uvd.inst->filp[i] == filp) {
struct dma_fence *fence; struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle, r = amdgpu_uvd_get_destroy_msg(ring, handle,
...@@ -391,8 +391,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) ...@@ -391,8 +391,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
dma_fence_wait(fence, false); dma_fence_wait(fence, false);
dma_fence_put(fence); dma_fence_put(fence);
adev->uvd.filp[i] = NULL; adev->uvd.inst->filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0); atomic_set(&adev->uvd.inst->handles[i], 0);
} }
} }
} }
...@@ -696,13 +696,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -696,13 +696,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */ /* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) { if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle); DRM_ERROR("Handle 0x%x already in use!\n", handle);
return -EINVAL; return -EINVAL;
} }
if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) {
adev->uvd.filp[i] = ctx->parser->filp; adev->uvd.inst->filp[i] = ctx->parser->filp;
return 0; return 0;
} }
} }
...@@ -719,8 +719,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -719,8 +719,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* validate the handle */ /* validate the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) { if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
if (adev->uvd.filp[i] != ctx->parser->filp) { if (adev->uvd.inst->filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD handle collision detected!\n"); DRM_ERROR("UVD handle collision detected!\n");
return -EINVAL; return -EINVAL;
} }
...@@ -734,7 +734,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -734,7 +734,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
case 2: case 2:
/* it's a destroy msg, free the handle */ /* it's a destroy msg, free the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0);
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(bo);
return 0; return 0;
...@@ -810,7 +810,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -810,7 +810,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
} }
if ((cmd == 0 || cmd == 0x3) && if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end); start, end);
return -EINVAL; return -EINVAL;
...@@ -1043,7 +1043,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1043,7 +1043,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r) if (r)
goto err_free; goto err_free;
r = amdgpu_job_submit(job, ring, &adev->uvd.entity, r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err_free; goto err_free;
...@@ -1131,8 +1131,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1131,8 +1131,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
static void amdgpu_uvd_idle_work_handler(struct work_struct *work) static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{ {
struct amdgpu_device *adev = struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.idle_work.work); container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
if (fences == 0) { if (fences == 0) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
...@@ -1146,7 +1146,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) ...@@ -1146,7 +1146,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
} else { } else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
} }
} }
...@@ -1158,7 +1158,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1158,7 +1158,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return; return;
set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
if (set_clocks) { if (set_clocks) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true); amdgpu_dpm_enable_uvd(adev, true);
...@@ -1175,7 +1175,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1175,7 +1175,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{ {
if (!amdgpu_sriov_vf(ring->adev)) if (!amdgpu_sriov_vf(ring->adev))
schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
} }
/** /**
...@@ -1237,7 +1237,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) ...@@ -1237,7 +1237,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
* necessarily linear. So we need to count * necessarily linear. So we need to count
* all non-zero handles. * all non-zero handles.
*/ */
if (atomic_read(&adev->uvd.handles[i])) if (atomic_read(&adev->uvd.inst->handles[i]))
used_handles++; used_handles++;
} }
......
...@@ -31,30 +31,37 @@ ...@@ -31,30 +31,37 @@
#define AMDGPU_UVD_SESSION_SIZE (50*1024) #define AMDGPU_UVD_SESSION_SIZE (50*1024)
#define AMDGPU_UVD_FIRMWARE_OFFSET 256 #define AMDGPU_UVD_FIRMWARE_OFFSET 256
#define AMDGPU_MAX_UVD_INSTANCES 2
#define AMDGPU_UVD_FIRMWARE_SIZE(adev) \ #define AMDGPU_UVD_FIRMWARE_SIZE(adev) \
(AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
8) - AMDGPU_UVD_FIRMWARE_OFFSET) 8) - AMDGPU_UVD_FIRMWARE_OFFSET)
struct amdgpu_uvd { struct amdgpu_uvd_inst {
struct amdgpu_bo *vcpu_bo; struct amdgpu_bo *vcpu_bo;
void *cpu_addr; void *cpu_addr;
uint64_t gpu_addr; uint64_t gpu_addr;
unsigned fw_version;
void *saved_bo; void *saved_bo;
unsigned max_handles;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work; struct delayed_work idle_work;
const struct firmware *fw; /* UVD firmware */
struct amdgpu_ring ring; struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
bool address_64_bit;
bool use_ctx_buf;
struct drm_sched_entity entity; struct drm_sched_entity entity;
struct drm_sched_entity entity_enc; struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
};
struct amdgpu_uvd {
const struct firmware *fw; /* UVD firmware */
unsigned fw_version;
unsigned max_handles;
unsigned num_enc_rings; unsigned num_enc_rings;
uint8_t num_uvd_inst;
bool address_64_bit;
bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
}; };
int amdgpu_uvd_sw_init(struct amdgpu_device *adev); int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
......
...@@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v4_2_early_init(void *handle) static int uvd_v4_2_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
uvd_v4_2_set_ring_funcs(adev); uvd_v4_2_set_ring_funcs(adev);
uvd_v4_2_set_irq_funcs(adev); uvd_v4_2_set_irq_funcs(adev);
...@@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle) ...@@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
int r; int r;
/* UVD TRAP */ /* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r) if (r)
return r; return r;
...@@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle) ...@@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle)
if (r) if (r)
return r; return r;
ring = &adev->uvd.ring; ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd"); sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
return r; return r;
} }
...@@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, ...@@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
static int uvd_v4_2_hw_init(void *handle) static int uvd_v4_2_hw_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp; uint32_t tmp;
int r; int r;
...@@ -208,7 +209,7 @@ static int uvd_v4_2_hw_init(void *handle) ...@@ -208,7 +209,7 @@ static int uvd_v4_2_hw_init(void *handle)
static int uvd_v4_2_hw_fini(void *handle) static int uvd_v4_2_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0) if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev); uvd_v4_2_stop(adev);
...@@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle) ...@@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle)
*/ */
static int uvd_v4_2_start(struct amdgpu_device *adev) static int uvd_v4_2_start(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz; uint32_t rb_bufsz;
int i, j, r; int i, j, r;
u32 tmp; u32 tmp;
...@@ -536,7 +537,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) ...@@ -536,7 +537,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
uint32_t size; uint32_t size;
/* programm the VCPU memory controller bits 0-27 */ /* programm the VCPU memory controller bits 0-27 */
addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE0, size); WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
...@@ -553,11 +554,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) ...@@ -553,11 +554,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE2, size); WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
/* bits 28-31 */ /* bits 28-31 */
addr = (adev->uvd.gpu_addr >> 28) & 0xF; addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
/* bits 32-39 */ /* bits 32-39 */
addr = (adev->uvd.gpu_addr >> 32) & 0xFF; addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
...@@ -664,7 +665,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, ...@@ -664,7 +665,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
DRM_DEBUG("IH: UVD TRAP\n"); DRM_DEBUG("IH: UVD TRAP\n");
amdgpu_fence_process(&adev->uvd.ring); amdgpu_fence_process(&adev->uvd.inst->ring);
return 0; return 0;
} }
...@@ -753,7 +754,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { ...@@ -753,7 +754,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
} }
static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
...@@ -763,8 +764,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { ...@@ -763,8 +764,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.irq.num_types = 1; adev->uvd.inst->irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
} }
const struct amdgpu_ip_block_version uvd_v4_2_ip_block = const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
......
...@@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v5_0_early_init(void *handle) static int uvd_v5_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
uvd_v5_0_set_ring_funcs(adev); uvd_v5_0_set_ring_funcs(adev);
uvd_v5_0_set_irq_funcs(adev); uvd_v5_0_set_irq_funcs(adev);
...@@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle) ...@@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle)
int r; int r;
/* UVD TRAP */ /* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r) if (r)
return r; return r;
...@@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle) ...@@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
ring = &adev->uvd.ring; ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd"); sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
return r; return r;
} }
...@@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle) ...@@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle)
static int uvd_v5_0_hw_init(void *handle) static int uvd_v5_0_hw_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp; uint32_t tmp;
int r; int r;
...@@ -204,7 +205,7 @@ static int uvd_v5_0_hw_init(void *handle) ...@@ -204,7 +205,7 @@ static int uvd_v5_0_hw_init(void *handle)
static int uvd_v5_0_hw_fini(void *handle) static int uvd_v5_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0) if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev); uvd_v5_0_stop(adev);
...@@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) ...@@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
/* programm memory controller bits 0-27 */ /* programm memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.gpu_addr)); lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.gpu_addr)); upper_32_bits(adev->uvd.inst->gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET; offset = AMDGPU_UVD_FIRMWARE_OFFSET;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev); size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
...@@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) ...@@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
*/ */
static int uvd_v5_0_start(struct amdgpu_device *adev) static int uvd_v5_0_start(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz, tmp; uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl; uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl; uint32_t mp_swap_cntl;
...@@ -586,7 +587,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, ...@@ -586,7 +587,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
DRM_DEBUG("IH: UVD TRAP\n"); DRM_DEBUG("IH: UVD TRAP\n");
amdgpu_fence_process(&adev->uvd.ring); amdgpu_fence_process(&adev->uvd.inst->ring);
return 0; return 0;
} }
...@@ -861,7 +862,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { ...@@ -861,7 +862,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
} }
static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
...@@ -871,8 +872,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { ...@@ -871,8 +872,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.irq.num_types = 1; adev->uvd.inst->irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
} }
const struct amdgpu_ip_block_version uvd_v5_0_ip_block = const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
......
...@@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0]) if (ring == &adev->uvd.inst->ring_enc[0])
return RREG32(mmUVD_RB_RPTR); return RREG32(mmUVD_RB_RPTR);
else else
return RREG32(mmUVD_RB_RPTR2); return RREG32(mmUVD_RB_RPTR2);
...@@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0]) if (ring == &adev->uvd.inst->ring_enc[0])
return RREG32(mmUVD_RB_WPTR); return RREG32(mmUVD_RB_WPTR);
else else
return RREG32(mmUVD_RB_WPTR2); return RREG32(mmUVD_RB_WPTR2);
...@@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0]) if (ring == &adev->uvd.inst->ring_enc[0])
WREG32(mmUVD_RB_WPTR, WREG32(mmUVD_RB_WPTR,
lower_32_bits(ring->wptr)); lower_32_bits(ring->wptr));
else else
...@@ -375,6 +375,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -375,6 +375,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
static int uvd_v6_0_early_init(void *handle) static int uvd_v6_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
if (!(adev->flags & AMD_IS_APU) && if (!(adev->flags & AMD_IS_APU) &&
(RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
...@@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle) ...@@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */ /* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r) if (r)
return r; return r;
/* UVD ENC TRAP */ /* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
if (r) if (r)
return r; return r;
} }
...@@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle) ...@@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle)
if (!uvd_v6_0_enc_support(adev)) { if (!uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.ring_enc[i].funcs = NULL; adev->uvd.inst->ring_enc[i].funcs = NULL;
adev->uvd.irq.num_types = 1; adev->uvd.inst->irq.num_types = 1;
adev->uvd.num_enc_rings = 0; adev->uvd.num_enc_rings = 0;
DRM_INFO("UVD ENC is disabled\n"); DRM_INFO("UVD ENC is disabled\n");
} else { } else {
struct drm_sched_rq *rq; struct drm_sched_rq *rq;
ring = &adev->uvd.ring_enc[0]; ring = &adev->uvd.inst->ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
rq, NULL); rq, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n"); DRM_ERROR("Failed setting up UVD ENC run queue.\n");
...@@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle) ...@@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
ring = &adev->uvd.ring; ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd"); sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r) if (r)
return r; return r;
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i]; ring = &adev->uvd.inst->ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i); sprintf(ring->name, "uvd_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r) if (r)
return r; return r;
} }
...@@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle) ...@@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle)
return r; return r;
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]); amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
} }
return amdgpu_uvd_sw_fini(adev); return amdgpu_uvd_sw_fini(adev);
...@@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle) ...@@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle)
static int uvd_v6_0_hw_init(void *handle) static int uvd_v6_0_hw_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp; uint32_t tmp;
int i, r; int i, r;
...@@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle) ...@@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle)
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i]; ring = &adev->uvd.inst->ring_enc[i];
ring->ready = true; ring->ready = true;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
if (r) { if (r) {
...@@ -563,7 +564,7 @@ static int uvd_v6_0_hw_init(void *handle) ...@@ -563,7 +564,7 @@ static int uvd_v6_0_hw_init(void *handle)
static int uvd_v6_0_hw_fini(void *handle) static int uvd_v6_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0) if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev); uvd_v6_0_stop(adev);
...@@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) ...@@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
/* programm memory controller bits 0-27 */ /* programm memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.gpu_addr)); lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.gpu_addr)); upper_32_bits(adev->uvd.inst->gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET; offset = AMDGPU_UVD_FIRMWARE_OFFSET;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev); size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
...@@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, ...@@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
*/ */
static int uvd_v6_0_start(struct amdgpu_device *adev) static int uvd_v6_0_start(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz, tmp; uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl; uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl; uint32_t mp_swap_cntl;
...@@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ...@@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
ring = &adev->uvd.ring_enc[0]; ring = &adev->uvd.inst->ring_enc[0];
WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
ring = &adev->uvd.ring_enc[1]; ring = &adev->uvd.inst->ring_enc[1];
WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
...@@ -1158,10 +1159,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle) ...@@ -1158,10 +1159,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->uvd.srbm_soft_reset = srbm_soft_reset; adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
return true; return true;
} else { } else {
adev->uvd.srbm_soft_reset = 0; adev->uvd.inst->srbm_soft_reset = 0;
return false; return false;
} }
} }
...@@ -1170,7 +1171,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle) ...@@ -1170,7 +1171,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.srbm_soft_reset) if (!adev->uvd.inst->srbm_soft_reset)
return 0; return 0;
uvd_v6_0_stop(adev); uvd_v6_0_stop(adev);
...@@ -1182,9 +1183,9 @@ static int uvd_v6_0_soft_reset(void *handle) ...@@ -1182,9 +1183,9 @@ static int uvd_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset; u32 srbm_soft_reset;
if (!adev->uvd.srbm_soft_reset) if (!adev->uvd.inst->srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->uvd.srbm_soft_reset; srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
if (srbm_soft_reset) { if (srbm_soft_reset) {
u32 tmp; u32 tmp;
...@@ -1212,7 +1213,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) ...@@ -1212,7 +1213,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.srbm_soft_reset) if (!adev->uvd.inst->srbm_soft_reset)
return 0; return 0;
mdelay(5); mdelay(5);
...@@ -1238,17 +1239,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, ...@@ -1238,17 +1239,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) { switch (entry->src_id) {
case 124: case 124:
amdgpu_fence_process(&adev->uvd.ring); amdgpu_fence_process(&adev->uvd.inst->ring);
break; break;
case 119: case 119:
if (likely(uvd_v6_0_enc_support(adev))) if (likely(uvd_v6_0_enc_support(adev)))
amdgpu_fence_process(&adev->uvd.ring_enc[0]); amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
else else
int_handled = false; int_handled = false;
break; break;
case 120: case 120:
if (likely(uvd_v6_0_enc_support(adev))) if (likely(uvd_v6_0_enc_support(adev)))
amdgpu_fence_process(&adev->uvd.ring_enc[1]); amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
else else
int_handled = false; int_handled = false;
break; break;
...@@ -1612,10 +1613,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { ...@@ -1612,10 +1613,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
if (adev->asic_type >= CHIP_POLARIS10) { if (adev->asic_type >= CHIP_POLARIS10) {
adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
DRM_INFO("UVD is enabled in VM mode\n"); DRM_INFO("UVD is enabled in VM mode\n");
} else { } else {
adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
DRM_INFO("UVD is enabled in physical mode\n"); DRM_INFO("UVD is enabled in physical mode\n");
} }
} }
...@@ -1625,7 +1626,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) ...@@ -1625,7 +1626,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i; int i;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
DRM_INFO("UVD ENC is enabled in VM mode\n"); DRM_INFO("UVD ENC is enabled in VM mode\n");
} }
...@@ -1638,11 +1639,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { ...@@ -1638,11 +1639,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
if (uvd_v6_0_enc_support(adev)) if (uvd_v6_0_enc_support(adev))
adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
else else
adev->uvd.irq.num_types = 1; adev->uvd.inst->irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
} }
const struct amdgpu_ip_block_version uvd_v6_0_ip_block = const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment