Commit 5d4af988 authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu/vce: simplify vce instance setup

Set the me instance in early init and use that rather than
calculating the instance based on the ring pointer.
Reviewed-by: default avatarJames Zhu <James.Zhu@amd.com>
Reviewed-by: default avatarLeo Liu <leo.liu@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1cf0abb6
...@@ -56,7 +56,7 @@ static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -56,7 +56,7 @@ static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
return RREG32(mmVCE_RB_RPTR); return RREG32(mmVCE_RB_RPTR);
else else
return RREG32(mmVCE_RB_RPTR2); return RREG32(mmVCE_RB_RPTR2);
...@@ -73,7 +73,7 @@ static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -73,7 +73,7 @@ static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
return RREG32(mmVCE_RB_WPTR); return RREG32(mmVCE_RB_WPTR);
else else
return RREG32(mmVCE_RB_WPTR2); return RREG32(mmVCE_RB_WPTR2);
...@@ -90,7 +90,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -90,7 +90,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else else
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
...@@ -627,8 +627,10 @@ static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) ...@@ -627,8 +627,10 @@ static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
int i; int i;
for (i = 0; i < adev->vce.num_rings; i++) for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs; adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
adev->vce.ring[i].me = i;
}
} }
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = { static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
......
...@@ -86,9 +86,9 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -86,9 +86,9 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
v = RREG32(mmVCE_RB_RPTR); v = RREG32(mmVCE_RB_RPTR);
else if (ring == &adev->vce.ring[1]) else if (ring->me == 1)
v = RREG32(mmVCE_RB_RPTR2); v = RREG32(mmVCE_RB_RPTR2);
else else
v = RREG32(mmVCE_RB_RPTR3); v = RREG32(mmVCE_RB_RPTR3);
...@@ -118,9 +118,9 @@ static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -118,9 +118,9 @@ static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
v = RREG32(mmVCE_RB_WPTR); v = RREG32(mmVCE_RB_WPTR);
else if (ring == &adev->vce.ring[1]) else if (ring->me == 1)
v = RREG32(mmVCE_RB_WPTR2); v = RREG32(mmVCE_RB_WPTR2);
else else
v = RREG32(mmVCE_RB_WPTR3); v = RREG32(mmVCE_RB_WPTR3);
...@@ -149,9 +149,9 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -149,9 +149,9 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else if (ring == &adev->vce.ring[1]) else if (ring->me == 1)
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else else
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
...@@ -942,12 +942,16 @@ static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) ...@@ -942,12 +942,16 @@ static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
int i; int i;
if (adev->asic_type >= CHIP_STONEY) { if (adev->asic_type >= CHIP_STONEY) {
for (i = 0; i < adev->vce.num_rings; i++) for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
adev->vce.ring[i].me = i;
}
DRM_INFO("VCE enabled in VM mode\n"); DRM_INFO("VCE enabled in VM mode\n");
} else { } else {
for (i = 0; i < adev->vce.num_rings; i++) for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
adev->vce.ring[i].me = i;
}
DRM_INFO("VCE enabled in physical mode\n"); DRM_INFO("VCE enabled in physical mode\n");
} }
} }
......
...@@ -60,9 +60,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -60,9 +60,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR)); return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
else if (ring == &adev->vce.ring[1]) else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2)); return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
else else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3)); return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
...@@ -82,9 +82,9 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -82,9 +82,9 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs]; return adev->wb.wb[ring->wptr_offs];
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR)); return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
else if (ring == &adev->vce.ring[1]) else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2)); return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
else else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3)); return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
...@@ -108,10 +108,10 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -108,10 +108,10 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
return; return;
} }
if (ring == &adev->vce.ring[0]) if (ring->me == 0)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
lower_32_bits(ring->wptr)); lower_32_bits(ring->wptr));
else if (ring == &adev->vce.ring[1]) else if (ring->me == 1)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
lower_32_bits(ring->wptr)); lower_32_bits(ring->wptr));
else else
...@@ -1088,8 +1088,10 @@ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev) ...@@ -1088,8 +1088,10 @@ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
int i; int i;
for (i = 0; i < adev->vce.num_rings; i++) for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs; adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
adev->vce.ring[i].me = i;
}
DRM_INFO("VCE enabled in VM mode\n"); DRM_INFO("VCE enabled in VM mode\n");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment