Commit a46bfdc8 authored by Chris Wilson's avatar Chris Wilson Committed by Joonas Lahtinen

drm/i915/gt: Wait for new requests in intel_gt_retire_requests()

Our callers fall into two categories, those passing timeout=0 who just
want to flush request retirements and those passing a timeout that need
to wait for submission completion (e.g. intel_gt_wait_for_idle()).
Currently, we only wait for a snapshot of timelines at the start of the
wait (but there was an expectation that new requests would cause timelines
to appear at the end). However, our callers, such as
intel_gt_wait_for_idle() before suspend, do require us to wait for the
power management requests emitted by retirement as well. If we don't,
then it takes an extra second or two for the background worker to flush
the queue and mark the GT as idle.

Fixes: 7e805762 ("drm/i915: Drop struct_mutex from around i915_retire_requests()")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191114225736.616885-1-chris@chris-wilson.co.uk
(cherry picked from commit 7936a22d)
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 2a39b072
...@@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
{ {
struct intel_gt_timelines *timelines = &gt->timelines; struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn; struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
unsigned long flags; unsigned long flags;
bool interruptible; bool interruptible;
LIST_HEAD(free); LIST_HEAD(free);
...@@ -46,10 +45,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -46,10 +45,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
spin_lock_irqsave(&timelines->lock, flags); spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) { if (!mutex_trylock(&tl->mutex))
active_count++; /* report busy to caller, try again? */
continue; continue;
}
intel_timeline_get(tl); intel_timeline_get(tl);
GEM_BUG_ON(!tl->active_count); GEM_BUG_ON(!tl->active_count);
...@@ -74,9 +71,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -74,9 +71,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
/* Resume iteration after dropping lock */ /* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link); list_safe_reset_next(tl, tn, link);
if (--tl->active_count) if (!--tl->active_count)
active_count += !!rcu_access_pointer(tl->last_request.fence);
else
list_del(&tl->link); list_del(&tl->link);
mutex_unlock(&tl->mutex); mutex_unlock(&tl->mutex);
...@@ -92,7 +87,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -92,7 +87,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_for_each_entry_safe(tl, tn, &free, link) list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref); __intel_timeline_free(&tl->kref);
return active_count ? timeout : 0; return list_empty(&timelines->active_list) ? 0 : timeout;
} }
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment