Commit c3874b75 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: stop blocking for page filp fences

Just register an callback and reschedule the work item if necessary.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6800e2ea
...@@ -761,6 +761,7 @@ struct amdgpu_flip_work { ...@@ -761,6 +761,7 @@ struct amdgpu_flip_work {
struct fence *excl; struct fence *excl;
unsigned shared_count; unsigned shared_count;
struct fence **shared; struct fence **shared;
struct fence_cb cb;
}; };
......
...@@ -35,24 +35,32 @@ ...@@ -35,24 +35,32 @@
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
static void amdgpu_flip_wait_fence(struct amdgpu_device *adev, static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
struct fence **f)
{ {
long r; struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
if (*f == NULL) fence_put(f);
return; queue_work(amdgpu_crtc->pflip_queue, &work->flip_work);
}
r = fence_wait(*f, false); static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
if (r) struct fence **f)
DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r); {
struct fence *fence= *f;
if (fence == NULL)
return false;
/* We continue with the page flip even if we failed to wait on
* the fence, otherwise the DRM core and userspace will be
* confused about which BO the CRTC is scanning out
*/
fence_put(*f);
*f = NULL; *f = NULL;
if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
fence_put(*f);
return false;
} }
static void amdgpu_flip_work_func(struct work_struct *__work) static void amdgpu_flip_work_func(struct work_struct *__work)
...@@ -68,9 +76,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work) ...@@ -68,9 +76,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
int vpos, hpos, stat, min_udelay; int vpos, hpos, stat, min_udelay;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl); if (amdgpu_flip_handle_fence(work, &work->excl))
return;
for (i = 0; i < work->shared_count; ++i) for (i = 0; i < work->shared_count; ++i)
amdgpu_flip_wait_fence(adev, &work->shared[i]); if (amdgpu_flip_handle_fence(work, &work->shared[i]))
return;
/* We borrow the event spin lock for protecting flip_status */ /* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
...@@ -234,7 +245,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, ...@@ -234,7 +245,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
/* update crtc fb */ /* update crtc fb */
crtc->primary->fb = fb; crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
queue_work(amdgpu_crtc->pflip_queue, &work->flip_work); amdgpu_flip_work_func(&work->flip_work);
return 0; return 0;
vblank_cleanup: vblank_cleanup:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment