Commit 1ffd2652 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: fix waiting for all fences before flipping

Otherwise we might see corruption.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4127a59e
......@@ -828,7 +828,9 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_rbo;
struct fence *fence;
struct fence *excl;
unsigned shared_count;
struct fence **shared;
};
......
......@@ -35,22 +35,16 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
static void amdgpu_flip_work_func(struct work_struct *__work)
static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
struct fence **f)
{
struct amdgpu_flip_work *work =
container_of(__work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
struct amdgpu_fence *fence;
unsigned long flags;
int r;
long r;
down_read(&adev->exclusive_lock);
if (work->fence) {
fence = to_amdgpu_fence(work->fence);
if (*f == NULL)
return;
fence = to_amdgpu_fence(*f);
if (fence) {
r = fence_wait(&fence->base, false);
if (r == -EDEADLK) {
......@@ -59,19 +53,34 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
down_read(&adev->exclusive_lock);
}
} else
r = fence_wait(work->fence, false);
r = fence_wait(*f, false);
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
/* We continue with the page flip even if we failed to wait on
* the fence, otherwise the DRM core and userspace will be
* confused about which BO the CRTC is scanning out
*/
fence_put(*f);
*f = NULL;
}
fence_put(work->fence);
work->fence = NULL;
}
static void amdgpu_flip_work_func(struct work_struct *__work)
{
struct amdgpu_flip_work *work =
container_of(__work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
unsigned i;
down_read(&adev->exclusive_lock);
amdgpu_flip_wait_fence(adev, &work->excl);
for (i = 0; i < work->shared_count; ++i)
amdgpu_flip_wait_fence(adev, &work->shared[i]);
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
......@@ -108,6 +117,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
DRM_ERROR("failed to reserve buffer after flip\n");
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
kfree(work->shared);
kfree(work);
}
......@@ -127,7 +137,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
u64 tiling_flags;
u64 base;
int r;
int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
......@@ -167,7 +177,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup;
}
work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(new_rbo);
DRM_ERROR("failed to get fences for buffer\n");
goto cleanup;
}
fence_get(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_get(work->shared[i]);
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
amdgpu_bo_unreserve(new_rbo);
......@@ -212,7 +234,10 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
fence_put(work->fence);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment