Commit f6828e0c authored by Jordan Crouse's avatar Jordan Crouse Committed by Rob Clark

drm/msm: Disable the RPTR shadow

Disable the RPTR shadow across all targets. It will be selectively
re-enabled later for targets that need it.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 7b3f3948
...@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu) ...@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* NOTE: PM4/micro-engine firmware registers look to be the same /* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to * for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but * adreno_gpu base class. Or push both PM4 and PFP but
......
...@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu) ...@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* setup access protection: */ /* setup access protection: */
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
......
...@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu) ...@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* Load PM4: */ /* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
......
...@@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu) ...@@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
a5xx_preempt_hw_init(gpu);
if (!adreno_is_a510(adreno_gpu)) if (!adreno_is_a510(adreno_gpu))
a5xx_gpmu_ucode_init(gpu); a5xx_gpmu_ucode_init(gpu);
...@@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu) ...@@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
a5xx_preempt_hw_init(gpu);
/* Disable the interrupts through the initial bringup stage */ /* Disable the interrupts through the initial bringup stage */
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
......
...@@ -695,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu) ...@@ -695,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
goto out; goto out;
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Always come up on rb 0 */ /* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0]; a6xx_gpu->cur_ring = gpu->rb[0];
......
...@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->memptrs->rptr = 0; ring->memptrs->rptr = 0;
} }
/*
* Setup REG_CP_RB_CNTL. The same value is used across targets (with
* the excpetion of A430 that disables the RPTR shadow) - the cacluation
* for the ringbuffer size and block size is moved to msm_gpu.h for the
* pre-processor to deal with and the A430 variant is ORed in here
*/
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT |
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
if (!adreno_is_a430(adreno_gpu)) {
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
rbmemptr(gpu->rb[0], rptr));
}
return 0; return 0;
} }
...@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring) struct msm_ringbuffer *ring)
{ {
if (adreno_is_a430(adreno_gpu)) return ring->memptrs->rptr = adreno_gpu_read(
return ring->memptrs->rptr = adreno_gpu_read( adreno_gpu, REG_ADRENO_CP_RB_RPTR);
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
else
return ring->memptrs->rptr;
} }
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment