Commit 88cec497 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Declare timeline.lock to be irq-free

Now that we never allow the intel_wakeref callbacks to be invoked from
interrupt context, we do not need the irqsafe spinlock for the timeline.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191120170858.3965380-1-chris@chris-wilson.co.uk
parent 5cba2884
...@@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
{ {
struct intel_gt_timelines *timelines = &gt->timelines; struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn; struct intel_timeline *tl, *tn;
unsigned long flags;
bool interruptible; bool interruptible;
LIST_HEAD(free); LIST_HEAD(free);
...@@ -43,7 +42,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -43,7 +42,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
flush_submission(gt); /* kick the ksoftirqd tasklets */ flush_submission(gt); /* kick the ksoftirqd tasklets */
spin_lock_irqsave(&timelines->lock, flags); spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) if (!mutex_trylock(&tl->mutex))
continue; continue;
...@@ -51,7 +50,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -51,7 +50,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
intel_timeline_get(tl); intel_timeline_get(tl);
GEM_BUG_ON(!atomic_read(&tl->active_count)); GEM_BUG_ON(!atomic_read(&tl->active_count));
atomic_inc(&tl->active_count); /* pin the list element */ atomic_inc(&tl->active_count); /* pin the list element */
spin_unlock_irqrestore(&timelines->lock, flags); spin_unlock(&timelines->lock);
if (timeout > 0) { if (timeout > 0) {
struct dma_fence *fence; struct dma_fence *fence;
...@@ -67,7 +66,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -67,7 +66,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
retire_requests(tl); retire_requests(tl);
spin_lock_irqsave(&timelines->lock, flags); spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */ /* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link); list_safe_reset_next(tl, tn, link);
...@@ -82,7 +81,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -82,7 +81,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_add(&tl->link, &free); list_add(&tl->link, &free);
} }
} }
spin_unlock_irqrestore(&timelines->lock, flags); spin_unlock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link) list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref); __intel_timeline_free(&tl->kref);
......
...@@ -831,7 +831,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -831,7 +831,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{ {
struct intel_gt_timelines *timelines = &gt->timelines; struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl; struct intel_timeline *tl;
unsigned long flags;
bool ok; bool ok;
if (!test_bit(I915_WEDGED, &gt->reset.flags)) if (!test_bit(I915_WEDGED, &gt->reset.flags))
...@@ -853,7 +852,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -853,7 +852,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* *
* No more can be submitted until we reset the wedged bit. * No more can be submitted until we reset the wedged bit.
*/ */
spin_lock_irqsave(&timelines->lock, flags); spin_lock(&timelines->lock);
list_for_each_entry(tl, &timelines->active_list, link) { list_for_each_entry(tl, &timelines->active_list, link) {
struct dma_fence *fence; struct dma_fence *fence;
...@@ -861,7 +860,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -861,7 +860,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (!fence) if (!fence)
continue; continue;
spin_unlock_irqrestore(&timelines->lock, flags); spin_unlock(&timelines->lock);
/* /*
* All internal dependencies (i915_requests) will have * All internal dependencies (i915_requests) will have
...@@ -874,10 +873,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -874,10 +873,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
dma_fence_put(fence); dma_fence_put(fence);
/* Restart iteration after droping lock */ /* Restart iteration after droping lock */
spin_lock_irqsave(&timelines->lock, flags); spin_lock(&timelines->lock);
tl = list_entry(&timelines->active_list, typeof(*tl), link); tl = list_entry(&timelines->active_list, typeof(*tl), link);
} }
spin_unlock_irqrestore(&timelines->lock, flags); spin_unlock(&timelines->lock);
/* We must reset pending GPU events before restoring our submission */ /* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
......
...@@ -332,7 +332,6 @@ int intel_timeline_pin(struct intel_timeline *tl) ...@@ -332,7 +332,6 @@ int intel_timeline_pin(struct intel_timeline *tl)
void intel_timeline_enter(struct intel_timeline *tl) void intel_timeline_enter(struct intel_timeline *tl)
{ {
struct intel_gt_timelines *timelines = &tl->gt->timelines; struct intel_gt_timelines *timelines = &tl->gt->timelines;
unsigned long flags;
/* /*
* Pretend we are serialised by the timeline->mutex. * Pretend we are serialised by the timeline->mutex.
...@@ -358,16 +357,15 @@ void intel_timeline_enter(struct intel_timeline *tl) ...@@ -358,16 +357,15 @@ void intel_timeline_enter(struct intel_timeline *tl)
if (atomic_add_unless(&tl->active_count, 1, 0)) if (atomic_add_unless(&tl->active_count, 1, 0))
return; return;
spin_lock_irqsave(&timelines->lock, flags); spin_lock(&timelines->lock);
if (!atomic_fetch_inc(&tl->active_count)) if (!atomic_fetch_inc(&tl->active_count))
list_add_tail(&tl->link, &timelines->active_list); list_add_tail(&tl->link, &timelines->active_list);
spin_unlock_irqrestore(&timelines->lock, flags); spin_unlock(&timelines->lock);
} }
void intel_timeline_exit(struct intel_timeline *tl) void intel_timeline_exit(struct intel_timeline *tl)
{ {
struct intel_gt_timelines *timelines = &tl->gt->timelines; struct intel_gt_timelines *timelines = &tl->gt->timelines;
unsigned long flags;
/* See intel_timeline_enter() */ /* See intel_timeline_enter() */
lockdep_assert_held(&tl->mutex); lockdep_assert_held(&tl->mutex);
...@@ -376,10 +374,10 @@ void intel_timeline_exit(struct intel_timeline *tl) ...@@ -376,10 +374,10 @@ void intel_timeline_exit(struct intel_timeline *tl)
if (atomic_add_unless(&tl->active_count, -1, 1)) if (atomic_add_unless(&tl->active_count, -1, 1))
return; return;
spin_lock_irqsave(&timelines->lock, flags); spin_lock(&timelines->lock);
if (atomic_dec_and_test(&tl->active_count)) if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link); list_del(&tl->link);
spin_unlock_irqrestore(&timelines->lock, flags); spin_unlock(&timelines->lock);
/* /*
* Since this timeline is idle, all bariers upon which we were waiting * Since this timeline is idle, all bariers upon which we were waiting
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment