Commit ccb23d2d authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Guard timeline pinning without relying on struct_mutex

In preparation for removing struct_mutex from around context retirement,
we need to make timeline pinning and unpinning safe. Since multiple
engines/contexts can share a single timeline, we cannot rely on
borrowing the context mutex (otherwise we could state that the timeline
is only pinned/unpinned inside the context pin/unpin and so guarded by
it). However, we only perform a sequence of atomic operations inside the
timeline pin/unpin and the sequence of those operations is safe for a
concurrent unpin / pin, so we can relax the struct_mutex requirement.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190815205709.24285-3-chris@chris-wilson.co.uk
parent 338aade9
...@@ -211,9 +211,9 @@ int intel_timeline_init(struct intel_timeline *timeline, ...@@ -211,9 +211,9 @@ int intel_timeline_init(struct intel_timeline *timeline,
void *vaddr; void *vaddr;
kref_init(&timeline->kref); kref_init(&timeline->kref);
atomic_set(&timeline->pin_count, 0);
timeline->gt = gt; timeline->gt = gt;
timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp; timeline->has_initial_breadcrumb = !hwsp;
timeline->hwsp_cacheline = NULL; timeline->hwsp_cacheline = NULL;
...@@ -280,7 +280,7 @@ void intel_timelines_init(struct drm_i915_private *i915) ...@@ -280,7 +280,7 @@ void intel_timelines_init(struct drm_i915_private *i915)
void intel_timeline_fini(struct intel_timeline *timeline) void intel_timeline_fini(struct intel_timeline *timeline)
{ {
GEM_BUG_ON(timeline->pin_count); GEM_BUG_ON(atomic_read(&timeline->pin_count));
GEM_BUG_ON(!list_empty(&timeline->requests)); GEM_BUG_ON(!list_empty(&timeline->requests));
if (timeline->hwsp_cacheline) if (timeline->hwsp_cacheline)
...@@ -314,33 +314,31 @@ int intel_timeline_pin(struct intel_timeline *tl) ...@@ -314,33 +314,31 @@ int intel_timeline_pin(struct intel_timeline *tl)
{ {
int err; int err;
if (tl->pin_count++) if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0; return 0;
GEM_BUG_ON(!tl->pin_count);
GEM_BUG_ON(tl->active_count);
err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH); err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err) if (err)
goto unpin; return err;
tl->hwsp_offset = tl->hwsp_offset =
i915_ggtt_offset(tl->hwsp_ggtt) + i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset); offset_in_page(tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline); cacheline_acquire(tl->hwsp_cacheline);
if (atomic_fetch_inc(&tl->pin_count)) {
cacheline_release(tl->hwsp_cacheline);
__i915_vma_unpin(tl->hwsp_ggtt);
}
return 0; return 0;
unpin:
tl->pin_count = 0;
return err;
} }
void intel_timeline_enter(struct intel_timeline *tl) void intel_timeline_enter(struct intel_timeline *tl)
{ {
struct intel_gt_timelines *timelines = &tl->gt->timelines; struct intel_gt_timelines *timelines = &tl->gt->timelines;
GEM_BUG_ON(!tl->pin_count); GEM_BUG_ON(!atomic_read(&tl->pin_count));
if (tl->active_count++) if (tl->active_count++)
return; return;
GEM_BUG_ON(!tl->active_count); /* overflow? */ GEM_BUG_ON(!tl->active_count); /* overflow? */
...@@ -372,7 +370,7 @@ void intel_timeline_exit(struct intel_timeline *tl) ...@@ -372,7 +370,7 @@ void intel_timeline_exit(struct intel_timeline *tl)
static u32 timeline_advance(struct intel_timeline *tl) static u32 timeline_advance(struct intel_timeline *tl)
{ {
GEM_BUG_ON(!tl->pin_count); GEM_BUG_ON(!atomic_read(&tl->pin_count));
GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
return tl->seqno += 1 + tl->has_initial_breadcrumb; return tl->seqno += 1 + tl->has_initial_breadcrumb;
...@@ -523,11 +521,10 @@ int intel_timeline_read_hwsp(struct i915_request *from, ...@@ -523,11 +521,10 @@ int intel_timeline_read_hwsp(struct i915_request *from,
void intel_timeline_unpin(struct intel_timeline *tl) void intel_timeline_unpin(struct intel_timeline *tl)
{ {
GEM_BUG_ON(!tl->pin_count); GEM_BUG_ON(!atomic_read(&tl->pin_count));
if (--tl->pin_count) if (!atomic_dec_and_test(&tl->pin_count))
return; return;
GEM_BUG_ON(tl->active_count);
cacheline_release(tl->hwsp_cacheline); cacheline_release(tl->hwsp_cacheline);
__i915_vma_unpin(tl->hwsp_ggtt); __i915_vma_unpin(tl->hwsp_ggtt);
......
...@@ -41,7 +41,7 @@ struct intel_timeline { ...@@ -41,7 +41,7 @@ struct intel_timeline {
* but the pin_count is protected by a combination of serialisation * but the pin_count is protected by a combination of serialisation
* from the intel_context caller plus internal atomicity. * from the intel_context caller plus internal atomicity.
*/ */
unsigned int pin_count; atomic_t pin_count;
unsigned int active_count; unsigned int active_count;
const u32 *hwsp_seqno; const u32 *hwsp_seqno;
......
...@@ -34,13 +34,13 @@ ...@@ -34,13 +34,13 @@
static void mock_timeline_pin(struct intel_timeline *tl) static void mock_timeline_pin(struct intel_timeline *tl)
{ {
tl->pin_count++; atomic_inc(&tl->pin_count);
} }
static void mock_timeline_unpin(struct intel_timeline *tl) static void mock_timeline_unpin(struct intel_timeline *tl)
{ {
GEM_BUG_ON(!tl->pin_count); GEM_BUG_ON(!atomic_read(&tl->pin_count));
tl->pin_count--; atomic_dec(&tl->pin_count);
} }
static struct intel_ring *mock_ring(struct intel_engine_cs *engine) static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment