Commit e61cd5e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm radeon fixes from Dave Airlie:
 "One core fix, but mostly radeon fixes for s/r and big endian UVD
  support, and a fix to stop the GPU being reset for no good reason, and
  crashing people's machines."

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: update lockup tracking when scheduling in empty ring
  drm/prime: Honor requested file flags when exporting a buffer
  drm/radeon: fix UVD on big endian
  drm/radeon: fix write back suspend regression with uvd v2
  drm/radeon: do not try to uselessly update virtual memory pagetable
parents 64a2f30a 9aa36876
...@@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, ...@@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
0600);
} }
EXPORT_SYMBOL(drm_gem_prime_export); EXPORT_SYMBOL(drm_gem_prime_export);
......
...@@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev) ...@@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev)
int r600_uvd_init(struct radeon_device *rdev) int r600_uvd_init(struct radeon_device *rdev)
{ {
int i, j, r; int i, j, r;
/* disable byte swapping */
u32 lmi_swap_cntl = 0;
u32 mp_swap_cntl = 0;
/* raise clocks while booting up the VCPU */ /* raise clocks while booting up the VCPU */
radeon_set_uvd_clocks(rdev, 53300, 40000); radeon_set_uvd_clocks(rdev, 53300, 40000);
...@@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev) ...@@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev)
WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
(1 << 21) | (1 << 9) | (1 << 20)); (1 << 21) | (1 << 9) | (1 << 20));
/* disable byte swapping */ #ifdef __BIG_ENDIAN
WREG32(UVD_LMI_SWAP_CNTL, 0); /* swap (8 in 32) RB and IB */
WREG32(UVD_MP_SWAP_CNTL, 0); lmi_swap_cntl = 0xa;
mp_swap_cntl = 0;
#endif
WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
WREG32(UVD_MPC_SET_MUXA1, 0x0); WREG32(UVD_MPC_SET_MUXA1, 0x0);
......
...@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) ...@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/ */
void radeon_wb_disable(struct radeon_device *rdev) void radeon_wb_disable(struct radeon_device *rdev)
{ {
int r;
if (rdev->wb.wb_obj) {
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0))
return;
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
rdev->wb.enabled = false; rdev->wb.enabled = false;
} }
...@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev) ...@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
{ {
radeon_wb_disable(rdev); radeon_wb_disable(rdev);
if (rdev->wb.wb_obj) { if (rdev->wb.wb_obj) {
if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
radeon_bo_unref(&rdev->wb.wb_obj); radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL; rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL; rdev->wb.wb_obj = NULL;
...@@ -295,7 +290,6 @@ int radeon_wb_init(struct radeon_device *rdev) ...@@ -295,7 +290,6 @@ int radeon_wb_init(struct radeon_device *rdev)
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r; return r;
} }
}
r = radeon_bo_reserve(rdev->wb.wb_obj, false); r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
radeon_wb_fini(rdev); radeon_wb_fini(rdev);
...@@ -316,6 +310,7 @@ int radeon_wb_init(struct radeon_device *rdev) ...@@ -316,6 +310,7 @@ int radeon_wb_init(struct radeon_device *rdev)
radeon_wb_fini(rdev); radeon_wb_fini(rdev);
return r; return r;
} }
}
/* clear wb memory */ /* clear wb memory */
memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
......
...@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) ...@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{ {
struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
if (likely(rdev->wb.enabled || !drv->scratch_reg)) { if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
if (drv->cpu_addr) {
*drv->cpu_addr = cpu_to_le32(seq); *drv->cpu_addr = cpu_to_le32(seq);
}
} else { } else {
WREG32(drv->scratch_reg, seq); WREG32(drv->scratch_reg, seq);
} }
...@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) ...@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
u32 seq = 0; u32 seq = 0;
if (likely(rdev->wb.enabled || !drv->scratch_reg)) { if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
if (drv->cpu_addr) {
seq = le32_to_cpu(*drv->cpu_addr); seq = le32_to_cpu(*drv->cpu_addr);
} else {
seq = lower_32_bits(atomic64_read(&drv->last_seq));
}
} else { } else {
seq = RREG32(drv->scratch_reg); seq = RREG32(drv->scratch_reg);
} }
......
...@@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, ...@@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
int radeon_vm_bo_rmv(struct radeon_device *rdev, int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va) struct radeon_bo_va *bo_va)
{ {
int r; int r = 0;
mutex_lock(&rdev->vm_manager.lock); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex); mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset) {
r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
}
mutex_unlock(&rdev->vm_manager.lock); mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list); list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex); mutex_unlock(&bo_va->vm->mutex);
......
...@@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi ...@@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
return -ENOMEM; return -ENOMEM;
/* Align requested size with padding so unlock_commit can /* Align requested size with padding so unlock_commit can
* pad safely */ * pad safely */
radeon_ring_free_size(rdev, ring);
if (ring->ring_free_dw == (ring->ring_size / 4)) {
/* This is an empty ring update lockup info to avoid
* false positive.
*/
radeon_ring_lockup_update(ring);
}
ndw = (ndw + ring->align_mask) & ~ring->align_mask; ndw = (ndw + ring->align_mask) & ~ring->align_mask;
while (ndw > (ring->ring_free_dw - 1)) { while (ndw > (ring->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, ring); radeon_ring_free_size(rdev, ring);
......
...@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev) ...@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
if (!r) { if (!r) {
radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo);
rdev->uvd.cpu_addr = NULL;
if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
}
radeon_bo_unreserve(rdev->uvd.vcpu_bo); radeon_bo_unreserve(rdev->uvd.vcpu_bo);
if (rdev->uvd.cpu_addr) {
radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
} else {
rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
}
} }
return r; return r;
} }
...@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev) ...@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
return r; return r;
} }
/* Have been pin in cpu unmap unpin */
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo);
r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
&rdev->uvd.gpu_addr); &rdev->uvd.gpu_addr);
if (r) { if (r) {
...@@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, ...@@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
} }
/* stitch together an UVD create msg */ /* stitch together an UVD create msg */
msg[0] = 0x00000de4; msg[0] = cpu_to_le32(0x00000de4);
msg[1] = 0x00000000; msg[1] = cpu_to_le32(0x00000000);
msg[2] = handle; msg[2] = cpu_to_le32(handle);
msg[3] = 0x00000000; msg[3] = cpu_to_le32(0x00000000);
msg[4] = 0x00000000; msg[4] = cpu_to_le32(0x00000000);
msg[5] = 0x00000000; msg[5] = cpu_to_le32(0x00000000);
msg[6] = 0x00000000; msg[6] = cpu_to_le32(0x00000000);
msg[7] = 0x00000780; msg[7] = cpu_to_le32(0x00000780);
msg[8] = 0x00000440; msg[8] = cpu_to_le32(0x00000440);
msg[9] = 0x00000000; msg[9] = cpu_to_le32(0x00000000);
msg[10] = 0x01b37000; msg[10] = cpu_to_le32(0x01b37000);
for (i = 11; i < 1024; ++i) for (i = 11; i < 1024; ++i)
msg[i] = 0x0; msg[i] = cpu_to_le32(0x0);
radeon_bo_kunmap(bo); radeon_bo_kunmap(bo);
radeon_bo_unreserve(bo); radeon_bo_unreserve(bo);
...@@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, ...@@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
} }
/* stitch together an UVD destroy msg */ /* stitch together an UVD destroy msg */
msg[0] = 0x00000de4; msg[0] = cpu_to_le32(0x00000de4);
msg[1] = 0x00000002; msg[1] = cpu_to_le32(0x00000002);
msg[2] = handle; msg[2] = cpu_to_le32(handle);
msg[3] = 0x00000000; msg[3] = cpu_to_le32(0x00000000);
for (i = 4; i < 1024; ++i) for (i = 4; i < 1024; ++i)
msg[i] = 0x0; msg[i] = cpu_to_le32(0x0);
radeon_bo_kunmap(bo); radeon_bo_kunmap(bo);
radeon_bo_unreserve(bo); radeon_bo_unreserve(bo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment