Commit 6f6a73c8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2020-09-08' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "The i915 reverts are going to be a bit of a conflict mess for next, so
  I decided to dequeue them now, along with some msm fixes for a ring
  corruption issue, that Rob sent over the weekend.

  Summary:

  i915:
   - revert gpu relocation changes due to regression

  msm:
  - fixes for RPTR corruption issue"

* tag 'drm-fixes-2020-09-08' of git://anongit.freedesktop.org/drm/drm:
  Revert "drm/i915/gem: Delete unused code"
  Revert "drm/i915/gem: Async GPU relocations only"
  Revert "drm/i915: Remove i915_gem_object_get_dirty_page()"
  drm/msm: Disable the RPTR shadow
  drm/msm: Disable preemption on all 5xx targets
  drm/msm: Enable expanded apriv support for a650
  drm/msm: Split the a5xx preemption record
parents 612ab8ad 20561da3
......@@ -258,6 +258,10 @@ struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n);
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
......
......@@ -548,6 +548,20 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
return nth_page(sg_page(sg), offset);
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n)
{
struct page *page;
page = i915_gem_object_get_page(obj, n);
if (!obj->mm.dirty)
set_page_dirty(page);
return page;
}
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
......
......@@ -37,14 +37,20 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
return err;
/* 8-Byte aligned */
err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
if (err)
if (!__reloc_entry_gpu(eb, vma,
offsets[0] * sizeof(u32),
0)) {
err = -EIO;
goto unpin_vma;
}
/* !8-Byte aligned */
err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
if (err)
if (!__reloc_entry_gpu(eb, vma,
offsets[1] * sizeof(u32),
1)) {
err = -EIO;
goto unpin_vma;
}
/* Skip to the end of the cmd page */
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
......@@ -54,9 +60,12 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
eb->reloc_cache.rq_size += i;
/* Force batch chaining */
err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
if (err)
if (!__reloc_entry_gpu(eb, vma,
offsets[2] * sizeof(u32),
2)) {
err = -EIO;
goto unpin_vma;
}
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);
......
......@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but
......
......@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* setup access protection: */
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
......
......@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
......
......@@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
a5xx_preempt_hw_init(gpu);
if (!adreno_is_a510(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
......@@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
a5xx_preempt_hw_init(gpu);
/* Disable the interrupts through the initial bringup stage */
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
......@@ -1511,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
check_speed_bin(&pdev->dev);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
/* Restricting nr_rings to 1 to temporarily disable preemption */
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
......
......@@ -31,6 +31,7 @@ struct a5xx_gpu {
struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
......
......@@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr;
struct drm_gem_object *bo = NULL;
u64 iova = 0;
void *counters;
struct drm_gem_object *bo = NULL, *counters_bo = NULL;
u64 iova = 0, counters_iova = 0;
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
/* The buffer to store counters needs to be unprivileged */
counters = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
msm_gem_kernel_put(bo, gpu->aspace, true);
return PTR_ERR(counters);
}
msm_gem_object_set_name(bo, "preempt");
msm_gem_object_set_name(counters_bo, "preempt_counters");
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
......@@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
ptr->counter = counters_iova;
return 0;
}
......@@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++)
for (i = 0; i < gpu->nr_rings; i++) {
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i],
gpu->aspace, true);
}
}
void a5xx_preempt_init(struct msm_gpu *gpu)
......
......@@ -678,7 +678,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
if (adreno_is_a650(adreno_gpu)) {
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
}
......@@ -694,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret)
goto out;
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
......@@ -1056,6 +1064,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
if (adreno_is_a650(adreno_gpu))
adreno_gpu->base.hw_apriv = true;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
......
......@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->memptrs->rptr = 0;
}
/*
* Setup REG_CP_RB_CNTL. The same value is used across targets (with
* the excpetion of A430 that disables the RPTR shadow) - the cacluation
* for the ringbuffer size and block size is moved to msm_gpu.h for the
* pre-processor to deal with and the A430 variant is ORed in here
*/
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT |
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
if (!adreno_is_a430(adreno_gpu)) {
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
rbmemptr(gpu->rb[0], rptr));
}
return 0;
}
......@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
if (adreno_is_a430(adreno_gpu))
return ring->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
else
return ring->memptrs->rptr;
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
......
......@@ -908,7 +908,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
......
......@@ -15,6 +15,7 @@
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_ringbuffer.h"
#include "msm_gem.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
......@@ -139,6 +140,8 @@ struct msm_gpu {
} devfreq;
struct msm_gpu_state *crashstate;
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
};
/* It turns out that all targets use the same ringbuffer size */
......@@ -327,4 +330,12 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->dev->struct_mutex);
}
/*
* Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
* support expanded privileges
*/
#define check_apriv(gpu, flags) \
(((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
#endif /* __MSM_GPU_H__ */
......@@ -27,8 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo,
&ring->iova);
check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment