Commit 4856254d authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Repeat wait_for_idle for retirement workers

Since we may retire timelines from secondary workers,
intel_gt_retire_requests() is not always a reliable indicator that all
pending retirements are complete. If we do detect secondary workers are
in progress, recommend intel_gt_wait_for_idle() to repeat the retirement
check.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191221180204.1201217-1-chris@chris-wilson.co.uk
parent e6ba7648
...@@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine, ...@@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engines_are_idle(struct intel_gt *gt); bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engine_is_idle(struct intel_engine_cs *engine);
void intel_engine_flush_submission(struct intel_engine_cs *engine); bool intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engines_reset_default_submission(struct intel_gt *gt); void intel_engines_reset_default_submission(struct intel_gt *gt);
......
...@@ -1079,9 +1079,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine) ...@@ -1079,9 +1079,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle; return idle;
} }
void intel_engine_flush_submission(struct intel_engine_cs *engine) bool intel_engine_flush_submission(struct intel_engine_cs *engine)
{ {
struct tasklet_struct *t = &engine->execlists.tasklet; struct tasklet_struct *t = &engine->execlists.tasklet;
bool active = tasklet_is_locked(t);
if (__tasklet_is_scheduled(t)) { if (__tasklet_is_scheduled(t)) {
local_bh_disable(); local_bh_disable();
...@@ -1092,10 +1093,13 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine) ...@@ -1092,10 +1093,13 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
tasklet_unlock(t); tasklet_unlock(t);
} }
local_bh_enable(); local_bh_enable();
active = true;
} }
/* Otherwise flush the tasklet if it was running on another cpu */ /* Otherwise flush the tasklet if it was running on another cpu */
tasklet_unlock_wait(t); tasklet_unlock_wait(t);
return active;
} }
/** /**
......
...@@ -23,15 +23,18 @@ static void retire_requests(struct intel_timeline *tl) ...@@ -23,15 +23,18 @@ static void retire_requests(struct intel_timeline *tl)
break; break;
} }
static void flush_submission(struct intel_gt *gt) static bool flush_submission(struct intel_gt *gt)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
bool active = false;
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine); active |= intel_engine_flush_submission(engine);
flush_work(&engine->retire_work); active |= flush_work(&engine->retire_work);
} }
return active;
} }
static void engine_retire(struct work_struct *work) static void engine_retire(struct work_struct *work)
...@@ -120,9 +123,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -120,9 +123,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
spin_lock(&timelines->lock); spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
active_count++; /* report busy to caller, try again? */ active_count++; /* report busy to caller, try again? */
if (!mutex_trylock(&tl->mutex))
continue; continue;
}
intel_timeline_get(tl); intel_timeline_get(tl);
GEM_BUG_ON(!atomic_read(&tl->active_count)); GEM_BUG_ON(!atomic_read(&tl->active_count));
...@@ -147,10 +151,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -147,10 +151,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
/* Resume iteration after dropping lock */ /* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link); list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count)) { if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link); list_del(&tl->link);
active_count--; else
} active_count += i915_active_fence_isset(&tl->last_request);
mutex_unlock(&tl->mutex); mutex_unlock(&tl->mutex);
...@@ -165,7 +169,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -165,7 +169,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_for_each_entry_safe(tl, tn, &free, link) list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref); __intel_timeline_free(&tl->kref);
flush_submission(gt); if (flush_submission(gt))
active_count++;
return active_count ? timeout : 0; return active_count ? timeout : 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment