Commit 8052ff43 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-fixes-2020-09-04' of https://gitlab.freedesktop.org/drm/msm into drm-fixes

A few fixes for a potential RPTR corruption issue.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGvnr6Nhz2J0sjv2G+j7iceVtaDiJDT8T88uW6jiBfOGKQ@mail.gmail.com
parents f4d51dff f6828e0c
...@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu) ...@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* NOTE: PM4/micro-engine firmware registers look to be the same /* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to * for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but * adreno_gpu base class. Or push both PM4 and PFP but
......
...@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu) ...@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* setup access protection: */ /* setup access protection: */
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
......
...@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu) ...@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* Load PM4: */ /* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
......
...@@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu) ...@@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
a5xx_preempt_hw_init(gpu);
if (!adreno_is_a510(adreno_gpu)) if (!adreno_is_a510(adreno_gpu))
a5xx_gpmu_ucode_init(gpu); a5xx_gpmu_ucode_init(gpu);
...@@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu) ...@@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
return ret; return ret;
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
a5xx_preempt_hw_init(gpu);
/* Disable the interrupts through the initial bringup stage */ /* Disable the interrupts through the initial bringup stage */
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
...@@ -1511,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) ...@@ -1511,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
check_speed_bin(&pdev->dev); check_speed_bin(&pdev->dev);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); /* Restricting nr_rings to 1 to temporarily disable preemption */
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) { if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base)); a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -31,6 +31,7 @@ struct a5xx_gpu { ...@@ -31,6 +31,7 @@ struct a5xx_gpu {
struct msm_ringbuffer *next_ring; struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
......
...@@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ...@@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base; struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr; struct a5xx_preempt_record *ptr;
struct drm_gem_object *bo = NULL; void *counters;
u64 iova = 0; struct drm_gem_object *bo = NULL, *counters_bo = NULL;
u64 iova = 0, counters_iova = 0;
ptr = msm_gem_kernel_new(gpu->dev, ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED, gpu->aspace, &bo, &iova); MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr)) if (IS_ERR(ptr))
return PTR_ERR(ptr); return PTR_ERR(ptr);
/* The buffer to store counters needs to be unprivileged */
counters = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
msm_gem_kernel_put(bo, gpu->aspace, true);
return PTR_ERR(counters);
}
msm_gem_object_set_name(bo, "preempt"); msm_gem_object_set_name(bo, "preempt");
msm_gem_object_set_name(counters_bo, "preempt_counters");
a5xx_gpu->preempt_bo[ring->id] = bo; a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
a5xx_gpu->preempt_iova[ring->id] = iova; a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr; a5xx_gpu->preempt[ring->id] = ptr;
...@@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ...@@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->data = 0; ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr); ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE; ptr->counter = counters_iova;
return 0; return 0;
} }
...@@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu) ...@@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i; int i;
for (i = 0; i < gpu->nr_rings; i++) for (i = 0; i < gpu->nr_rings; i++) {
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true); msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i],
gpu->aspace, true);
}
} }
void a5xx_preempt_init(struct msm_gpu *gpu) void a5xx_preempt_init(struct msm_gpu *gpu)
......
...@@ -678,7 +678,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu) ...@@ -678,7 +678,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4)); A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0)); gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
if (adreno_is_a650(adreno_gpu)) { /* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1)); (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
} }
...@@ -694,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu) ...@@ -694,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret) if (ret)
goto out; goto out;
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Always come up on rb 0 */ /* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0]; a6xx_gpu->cur_ring = gpu->rb[0];
...@@ -1056,6 +1064,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) ...@@ -1056,6 +1064,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = NULL; adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets; adreno_gpu->reg_offsets = a6xx_register_offsets;
if (adreno_is_a650(adreno_gpu))
adreno_gpu->base.hw_apriv = true;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) { if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base)); a6xx_destroy(&(a6xx_gpu->base.base));
......
...@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->memptrs->rptr = 0; ring->memptrs->rptr = 0;
} }
/*
* Setup REG_CP_RB_CNTL. The same value is used across targets (with
* the excpetion of A430 that disables the RPTR shadow) - the cacluation
* for the ringbuffer size and block size is moved to msm_gpu.h for the
* pre-processor to deal with and the A430 variant is ORed in here
*/
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT |
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
if (!adreno_is_a430(adreno_gpu)) {
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
rbmemptr(gpu->rb[0], rptr));
}
return 0; return 0;
} }
...@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring) struct msm_ringbuffer *ring)
{ {
if (adreno_is_a430(adreno_gpu))
return ring->memptrs->rptr = adreno_gpu_read( return ring->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR); adreno_gpu, REG_ADRENO_CP_RB_RPTR);
else
return ring->memptrs->rptr;
} }
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
......
...@@ -908,7 +908,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -908,7 +908,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
memptrs = msm_gem_kernel_new(drm, memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings, sizeof(struct msm_rbmemptrs) * nr_rings,
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova); &memptrs_iova);
if (IS_ERR(memptrs)) { if (IS_ERR(memptrs)) {
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_fence.h" #include "msm_fence.h"
#include "msm_ringbuffer.h" #include "msm_ringbuffer.h"
#include "msm_gem.h"
struct msm_gem_submit; struct msm_gem_submit;
struct msm_gpu_perfcntr; struct msm_gpu_perfcntr;
...@@ -139,6 +140,8 @@ struct msm_gpu { ...@@ -139,6 +140,8 @@ struct msm_gpu {
} devfreq; } devfreq;
struct msm_gpu_state *crashstate; struct msm_gpu_state *crashstate;
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
}; };
/* It turns out that all targets use the same ringbuffer size */ /* It turns out that all targets use the same ringbuffer size */
...@@ -327,4 +330,12 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) ...@@ -327,4 +330,12 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->dev->struct_mutex); mutex_unlock(&gpu->dev->struct_mutex);
} }
/*
* Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
* support expanded privileges
*/
#define check_apriv(gpu, flags) \
(((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
#endif /* __MSM_GPU_H__ */ #endif /* __MSM_GPU_H__ */
...@@ -27,8 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ...@@ -27,8 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->id = id; ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo, check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
&ring->iova); gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) { if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start); ret = PTR_ERR(ring->start);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment