Commit 338aade9 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Convert timeline tracking to spinlock

Convert the active_list manipulation of timelines to use spinlocks so
that we can perform the updates from underneath a quick interrupt
callback, if need be.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190815205709.24285-2-chris@chris-wilson.co.uk
parent 531958f6
...@@ -40,7 +40,7 @@ struct intel_gt { ...@@ -40,7 +40,7 @@ struct intel_gt {
struct intel_uc uc; struct intel_uc uc;
struct intel_gt_timelines { struct intel_gt_timelines {
struct mutex mutex; /* protects list */ spinlock_t lock; /* protects active_list */
struct list_head active_list; struct list_head active_list;
/* Pack multiple timelines' seqnos into the same page */ /* Pack multiple timelines' seqnos into the same page */
......
...@@ -811,7 +811,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -811,7 +811,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* *
* No more can be submitted until we reset the wedged bit. * No more can be submitted until we reset the wedged bit.
*/ */
mutex_lock(&timelines->mutex); spin_lock(&timelines->lock);
list_for_each_entry(tl, &timelines->active_list, link) { list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq; struct i915_request *rq;
...@@ -819,6 +819,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -819,6 +819,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (!rq) if (!rq)
continue; continue;
spin_unlock(&timelines->lock);
/* /*
* All internal dependencies (i915_requests) will have * All internal dependencies (i915_requests) will have
* been flushed by the set-wedge, but we may be stuck waiting * been flushed by the set-wedge, but we may be stuck waiting
...@@ -828,8 +830,12 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -828,8 +830,12 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/ */
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT); dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq); i915_request_put(rq);
/* Restart iteration after droping lock */
spin_lock(&timelines->lock);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
} }
mutex_unlock(&timelines->mutex); spin_unlock(&timelines->lock);
intel_gt_sanitize(gt, false); intel_gt_sanitize(gt, false);
......
...@@ -266,7 +266,7 @@ static void timelines_init(struct intel_gt *gt) ...@@ -266,7 +266,7 @@ static void timelines_init(struct intel_gt *gt)
{ {
struct intel_gt_timelines *timelines = &gt->timelines; struct intel_gt_timelines *timelines = &gt->timelines;
mutex_init(&timelines->mutex); spin_lock_init(&timelines->lock);
INIT_LIST_HEAD(&timelines->active_list); INIT_LIST_HEAD(&timelines->active_list);
spin_lock_init(&timelines->hwsp_lock); spin_lock_init(&timelines->hwsp_lock);
...@@ -345,9 +345,9 @@ void intel_timeline_enter(struct intel_timeline *tl) ...@@ -345,9 +345,9 @@ void intel_timeline_enter(struct intel_timeline *tl)
return; return;
GEM_BUG_ON(!tl->active_count); /* overflow? */ GEM_BUG_ON(!tl->active_count); /* overflow? */
mutex_lock(&timelines->mutex); spin_lock(&timelines->lock);
list_add(&tl->link, &timelines->active_list); list_add(&tl->link, &timelines->active_list);
mutex_unlock(&timelines->mutex); spin_unlock(&timelines->lock);
} }
void intel_timeline_exit(struct intel_timeline *tl) void intel_timeline_exit(struct intel_timeline *tl)
...@@ -358,9 +358,9 @@ void intel_timeline_exit(struct intel_timeline *tl) ...@@ -358,9 +358,9 @@ void intel_timeline_exit(struct intel_timeline *tl)
if (--tl->active_count) if (--tl->active_count)
return; return;
mutex_lock(&timelines->mutex); spin_lock(&timelines->lock);
list_del(&tl->link); list_del(&tl->link);
mutex_unlock(&timelines->mutex); spin_unlock(&timelines->lock);
/* /*
* Since this timeline is idle, all bariers upon which we were waiting * Since this timeline is idle, all bariers upon which we were waiting
...@@ -548,8 +548,6 @@ static void timelines_fini(struct intel_gt *gt) ...@@ -548,8 +548,6 @@ static void timelines_fini(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->active_list)); GEM_BUG_ON(!list_empty(&timelines->active_list));
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
mutex_destroy(&timelines->mutex);
} }
void intel_timelines_fini(struct drm_i915_private *i915) void intel_timelines_fini(struct drm_i915_private *i915)
......
...@@ -897,18 +897,18 @@ static long ...@@ -897,18 +897,18 @@ static long
wait_for_timelines(struct drm_i915_private *i915, wait_for_timelines(struct drm_i915_private *i915,
unsigned int flags, long timeout) unsigned int flags, long timeout)
{ {
struct intel_gt_timelines *gt = &i915->gt.timelines; struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl; struct intel_timeline *tl;
mutex_lock(&gt->mutex); spin_lock(&timelines->lock);
list_for_each_entry(tl, &gt->active_list, link) { list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq; struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request); rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq) if (!rq)
continue; continue;
mutex_unlock(&gt->mutex); spin_unlock(&timelines->lock);
/* /*
* "Race-to-idle". * "Race-to-idle".
...@@ -928,10 +928,10 @@ wait_for_timelines(struct drm_i915_private *i915, ...@@ -928,10 +928,10 @@ wait_for_timelines(struct drm_i915_private *i915,
return timeout; return timeout;
/* restart after reacquiring the lock */ /* restart after reacquiring the lock */
mutex_lock(&gt->mutex); spin_lock(&timelines->lock);
tl = list_entry(&gt->active_list, typeof(*tl), link); tl = list_entry(&timelines->active_list, typeof(*tl), link);
} }
mutex_unlock(&gt->mutex); spin_unlock(&timelines->lock);
return timeout; return timeout;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment