Commit 45720959 authored by Maarten Lankhorst's avatar Maarten Lankhorst

drm/i915: Remove delayed FBC activation.

The only time we should start FBC is when we have waited a vblank
after the atomic update. We've already forced a vblank wait by doing
wait_for_flip_done before intel_post_plane_update(), so we don't need
to wait a second time before enabling.

Removing the worker simplifies the code and removes possible race
conditions, like happening in 103167.

Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103167Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180625163758.10871-2-maarten.lankhorst@linux.intel.comReviewed-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
parent c9855a56
......@@ -1659,11 +1659,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
else
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
if (fbc->work.scheduled)
seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
fbc->work.scheduled_vblank,
drm_crtc_vblank_count(&fbc->crtc->base));
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
......
......@@ -580,12 +580,6 @@ struct intel_fbc {
unsigned int gen9_wa_cfb_stride;
} params;
struct intel_fbc_work {
bool scheduled;
u64 scheduled_vblank;
struct work_struct work;
} work;
const char *no_fbc_reason;
};
......
......@@ -399,89 +399,6 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
return dev_priv->fbc.active;
}
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct drm_i915_private *dev_priv =
container_of(__work, struct drm_i915_private, fbc.work.work);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
struct intel_crtc *crtc = fbc->crtc;
struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
if (drm_crtc_vblank_get(&crtc->base)) {
/* CRTC is now off, leave FBC deactivated */
mutex_lock(&fbc->lock);
work->scheduled = false;
mutex_unlock(&fbc->lock);
return;
}
retry:
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*
* It is also worth mentioning that since work->scheduled_vblank can be
* updated multiple times by the other threads, hitting the timeout is
* not an error condition. We'll just end up hitting the "goto retry"
* case below.
*/
wait_event_timeout(vblank->queue,
drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
msecs_to_jiffies(50));
mutex_lock(&fbc->lock);
/* Were we cancelled? */
if (!work->scheduled)
goto out;
/* Were we delayed again while this function was sleeping? */
if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
mutex_unlock(&fbc->lock);
goto retry;
}
intel_fbc_hw_activate(dev_priv);
work->scheduled = false;
out:
mutex_unlock(&fbc->lock);
drm_crtc_vblank_put(&crtc->base);
}
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
WARN_ON(!mutex_is_locked(&fbc->lock));
if (WARN_ON(!fbc->enabled))
return;
if (drm_crtc_vblank_get(&crtc->base)) {
DRM_ERROR("vblank not available for FBC on pipe %c\n",
pipe_name(crtc->pipe));
return;
}
/* It is useless to call intel_fbc_cancel_work() or cancel_work() in
* this function since we're not releasing fbc.lock, so it won't have an
* opportunity to grab it to discover that it was cancelled. So we just
* update the expected jiffy count. */
work->scheduled = true;
work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
drm_crtc_vblank_put(&crtc->base);
schedule_work(&work->work);
}
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
const char *reason)
{
......@@ -489,11 +406,6 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
WARN_ON(!mutex_is_locked(&fbc->lock));
/* Calling cancel_work() here won't help due to the fact that the work
* function grabs fbc->lock. Just set scheduled to false so the work
* function can know it was cancelled. */
fbc->work.scheduled = false;
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
......@@ -1005,7 +917,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
if (!fbc->busy_bits) {
intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
intel_fbc_schedule_activation(crtc);
intel_fbc_hw_activate(dev_priv);
} else
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
......@@ -1212,8 +1124,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
mutex_unlock(&fbc->lock);
cancel_work_sync(&fbc->work.work);
}
/**
......@@ -1235,8 +1145,6 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&fbc->lock);
cancel_work_sync(&fbc->work.work);
}
static void intel_fbc_underrun_work_fn(struct work_struct *work)
......@@ -1387,12 +1295,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
fbc->work.scheduled = false;
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->has_fbc = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment