Commit 12ca695d authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter

drm/i915: Do not share hwsp across contexts any more, v8.

Instead of sharing pages with breadcrumbs, give each timeline a
single page. This allows unrelated timelines not to share locks
any more during command submission.

As an additional benefit, seqno wraparound no longer requires
i915_vma_pin, which means we no longer need to worry about a
potential -EDEADLK at a point where we are ready to submit.

Changes since v1:
- Fix erroneous i915_vma_acquire that should be a i915_vma_release (ickle).
- Extra check for completion in intel_read_hwsp().
Changes since v2:
- Fix inconsistent indent in hwsp_alloc() (kbuild)
- memset entire cacheline to 0.
Changes since v3:
- Do same in intel_timeline_reset_seqno(), and clflush for good measure.
Changes since v4:
- Use refcounting on timeline, instead of relying on i915_active.
- Fix waiting on kernel requests.
Changes since v5:
- Bump amount of slots to maximum (256), for best wraparounds.
- Add hwsp_offset to i915_request to fix potential wraparound hang.
- Ensure timeline wrap test works with the changes.
- Assign hwsp in intel_timeline_read_hwsp() within the rcu lock to
  fix a hang.
Changes since v6:
- Rename i915_request_active_offset to i915_request_active_seqno(),
  and elaborate the function. (tvrtko)
Changes since v7:
- Move hunk to where it belongs. (jekstrand)
- Replace CACHELINE_BYTES with TIMELINE_SEQNO_BYTES. (jekstrand)
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com> #v1
Reported-by: default avatarkernel test robot <lkp@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-2-maarten.lankhorst@linux.intel.com
parent 547be6a4
...@@ -143,7 +143,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, ...@@ -143,7 +143,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
int flush, int post) int flush, int post)
{ {
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH; *cs++ = MI_FLUSH;
......
...@@ -161,7 +161,7 @@ u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) ...@@ -161,7 +161,7 @@ u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE | PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL); PIPE_CONTROL_CS_STALL);
*cs++ = i915_request_active_timeline(rq)->hwsp_offset | *cs++ = i915_request_active_seqno(rq) |
PIPE_CONTROL_GLOBAL_GTT; PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno; *cs++ = rq->fence.seqno;
...@@ -359,7 +359,7 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) ...@@ -359,7 +359,7 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL); PIPE_CONTROL_CS_STALL);
*cs++ = i915_request_active_timeline(rq)->hwsp_offset; *cs++ = i915_request_active_seqno(rq);
*cs++ = rq->fence.seqno; *cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT; *cs++ = MI_USER_INTERRUPT;
...@@ -374,7 +374,7 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) ...@@ -374,7 +374,7 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{ {
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
...@@ -394,7 +394,7 @@ u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) ...@@ -394,7 +394,7 @@ u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
int i; int i;
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB | *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
......
...@@ -338,15 +338,14 @@ static u32 preempt_address(struct intel_engine_cs *engine) ...@@ -338,15 +338,14 @@ static u32 preempt_address(struct intel_engine_cs *engine)
static u32 hwsp_offset(const struct i915_request *rq) static u32 hwsp_offset(const struct i915_request *rq)
{ {
const struct intel_timeline_cacheline *cl; const struct intel_timeline *tl;
/* Before the request is executed, the timeline/cachline is fixed */ /* Before the request is executed, the timeline is fixed */
tl = rcu_dereference_protected(rq->timeline,
!i915_request_signaled(rq));
cl = rcu_dereference_protected(rq->hwsp_cacheline, 1); /* See the comment in i915_request_active_seqno(). */
if (cl) return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
return cl->ggtt_offset;
return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
} }
int gen8_emit_init_breadcrumb(struct i915_request *rq) int gen8_emit_init_breadcrumb(struct i915_request *rq)
......
...@@ -763,6 +763,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce) ...@@ -763,6 +763,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
frame->rq.engine = engine; frame->rq.engine = engine;
frame->rq.context = ce; frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline); rcu_assign_pointer(frame->rq.timeline, ce->timeline);
frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
frame->ring.vaddr = frame->cs; frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs); frame->ring.size = sizeof(frame->cs);
......
...@@ -39,10 +39,6 @@ struct intel_gt { ...@@ -39,10 +39,6 @@ struct intel_gt {
struct intel_gt_timelines { struct intel_gt_timelines {
spinlock_t lock; /* protects active_list */ spinlock_t lock; /* protects active_list */
struct list_head active_list; struct list_head active_list;
/* Pack multiple timelines' seqnos into the same page */
spinlock_t hwsp_lock;
struct list_head hwsp_free_list;
} timelines; } timelines;
struct intel_gt_requests { struct intel_gt_requests {
......
This diff is collapsed.
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
struct i915_vma; struct i915_vma;
struct i915_syncmap; struct i915_syncmap;
struct intel_gt; struct intel_gt;
struct intel_timeline_hwsp;
struct intel_timeline { struct intel_timeline {
u64 fence_context; u64 fence_context;
...@@ -45,12 +44,11 @@ struct intel_timeline { ...@@ -45,12 +44,11 @@ struct intel_timeline {
atomic_t pin_count; atomic_t pin_count;
atomic_t active_count; atomic_t active_count;
void *hwsp_map;
const u32 *hwsp_seqno; const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt; struct i915_vma *hwsp_ggtt;
u32 hwsp_offset; u32 hwsp_offset;
struct intel_timeline_cacheline *hwsp_cacheline;
bool has_initial_breadcrumb; bool has_initial_breadcrumb;
/** /**
...@@ -67,6 +65,8 @@ struct intel_timeline { ...@@ -67,6 +65,8 @@ struct intel_timeline {
*/ */
struct i915_active_fence last_request; struct i915_active_fence last_request;
struct i915_active active;
/** A chain of completed timelines ready for early retirement. */ /** A chain of completed timelines ready for early retirement. */
struct intel_timeline *retire; struct intel_timeline *retire;
...@@ -90,15 +90,4 @@ struct intel_timeline { ...@@ -90,15 +90,4 @@ struct intel_timeline {
struct rcu_head rcu; struct rcu_head rcu;
}; };
struct intel_timeline_cacheline {
struct i915_active active;
struct intel_timeline_hwsp *hwsp;
void *vaddr;
u32 ggtt_offset;
struct rcu_head rcu;
};
#endif /* __I915_TIMELINE_TYPES_H__ */ #endif /* __I915_TIMELINE_TYPES_H__ */
...@@ -42,6 +42,9 @@ static int perf_end(struct intel_gt *gt) ...@@ -42,6 +42,9 @@ static int perf_end(struct intel_gt *gt)
static int write_timestamp(struct i915_request *rq, int slot) static int write_timestamp(struct i915_request *rq, int slot)
{ {
struct intel_timeline *tl =
rcu_dereference_protected(rq->timeline,
!i915_request_signaled(rq));
u32 cmd; u32 cmd;
u32 *cs; u32 *cs;
...@@ -54,7 +57,7 @@ static int write_timestamp(struct i915_request *rq, int slot) ...@@ -54,7 +57,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
cmd++; cmd++;
*cs++ = cmd; *cs++ = cmd;
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
*cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32); *cs++ = tl->hwsp_offset + slot * sizeof(u32);
*cs++ = 0; *cs++ = 0;
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
......
...@@ -35,10 +35,11 @@ static unsigned long hwsp_cacheline(struct intel_timeline *tl) ...@@ -35,10 +35,11 @@ static unsigned long hwsp_cacheline(struct intel_timeline *tl)
{ {
unsigned long address = (unsigned long)page_address(hwsp_page(tl)); unsigned long address = (unsigned long)page_address(hwsp_page(tl));
return (address + tl->hwsp_offset) / CACHELINE_BYTES; return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES;
} }
#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) /* Only half of seqno's are usable, see __intel_timeline_get_seqno() */
#define CACHELINES_PER_PAGE (PAGE_SIZE / TIMELINE_SEQNO_BYTES / 2)
struct mock_hwsp_freelist { struct mock_hwsp_freelist {
struct intel_gt *gt; struct intel_gt *gt;
...@@ -666,7 +667,7 @@ static int live_hwsp_wrap(void *arg) ...@@ -666,7 +667,7 @@ static int live_hwsp_wrap(void *arg)
if (IS_ERR(tl)) if (IS_ERR(tl))
return PTR_ERR(tl); return PTR_ERR(tl);
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) if (!tl->has_initial_breadcrumb)
goto out_free; goto out_free;
err = intel_timeline_pin(tl, NULL); err = intel_timeline_pin(tl, NULL);
...@@ -833,12 +834,26 @@ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt) ...@@ -833,12 +834,26 @@ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
return 0; return 0;
} }
static void switch_tl_lock(struct i915_request *from, struct i915_request *to)
{
/* some light mutex juggling required; think co-routines */
if (from) {
lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie);
mutex_unlock(&from->context->timeline->mutex);
}
if (to) {
mutex_lock(&to->context->timeline->mutex);
to->cookie = lockdep_pin_lock(&to->context->timeline->mutex);
}
}
static int create_watcher(struct hwsp_watcher *w, static int create_watcher(struct hwsp_watcher *w,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
int ringsz) int ringsz)
{ {
struct intel_context *ce; struct intel_context *ce;
struct intel_timeline *tl;
ce = intel_context_create(engine); ce = intel_context_create(engine);
if (IS_ERR(ce)) if (IS_ERR(ce))
...@@ -851,11 +866,8 @@ static int create_watcher(struct hwsp_watcher *w, ...@@ -851,11 +866,8 @@ static int create_watcher(struct hwsp_watcher *w,
return PTR_ERR(w->rq); return PTR_ERR(w->rq);
w->addr = i915_ggtt_offset(w->vma); w->addr = i915_ggtt_offset(w->vma);
tl = w->rq->context->timeline;
/* some light mutex juggling required; think co-routines */ switch_tl_lock(w->rq, NULL);
lockdep_unpin_lock(&tl->mutex, w->rq->cookie);
mutex_unlock(&tl->mutex);
return 0; return 0;
} }
...@@ -864,15 +876,13 @@ static int check_watcher(struct hwsp_watcher *w, const char *name, ...@@ -864,15 +876,13 @@ static int check_watcher(struct hwsp_watcher *w, const char *name,
bool (*op)(u32 hwsp, u32 seqno)) bool (*op)(u32 hwsp, u32 seqno))
{ {
struct i915_request *rq = fetch_and_zero(&w->rq); struct i915_request *rq = fetch_and_zero(&w->rq);
struct intel_timeline *tl = rq->context->timeline;
u32 offset, end; u32 offset, end;
int err; int err;
GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size); GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size);
i915_request_get(rq); i915_request_get(rq);
mutex_lock(&tl->mutex); switch_tl_lock(NULL, rq);
rq->cookie = lockdep_pin_lock(&tl->mutex);
i915_request_add(rq); i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ) < 0) { if (i915_request_wait(rq, 0, HZ) < 0) {
...@@ -901,10 +911,7 @@ static int check_watcher(struct hwsp_watcher *w, const char *name, ...@@ -901,10 +911,7 @@ static int check_watcher(struct hwsp_watcher *w, const char *name,
static void cleanup_watcher(struct hwsp_watcher *w) static void cleanup_watcher(struct hwsp_watcher *w)
{ {
if (w->rq) { if (w->rq) {
struct intel_timeline *tl = w->rq->context->timeline; switch_tl_lock(NULL, w->rq);
mutex_lock(&tl->mutex);
w->rq->cookie = lockdep_pin_lock(&tl->mutex);
i915_request_add(w->rq); i915_request_add(w->rq);
} }
...@@ -942,7 +949,7 @@ static struct i915_request *wrap_timeline(struct i915_request *rq) ...@@ -942,7 +949,7 @@ static struct i915_request *wrap_timeline(struct i915_request *rq)
} }
i915_request_put(rq); i915_request_put(rq);
rq = intel_context_create_request(ce); rq = i915_request_create(ce);
if (IS_ERR(rq)) if (IS_ERR(rq))
return rq; return rq;
...@@ -977,7 +984,7 @@ static int live_hwsp_read(void *arg) ...@@ -977,7 +984,7 @@ static int live_hwsp_read(void *arg)
if (IS_ERR(tl)) if (IS_ERR(tl))
return PTR_ERR(tl); return PTR_ERR(tl);
if (!tl->hwsp_cacheline) if (!tl->has_initial_breadcrumb)
goto out_free; goto out_free;
for (i = 0; i < ARRAY_SIZE(watcher); i++) { for (i = 0; i < ARRAY_SIZE(watcher); i++) {
...@@ -999,7 +1006,7 @@ static int live_hwsp_read(void *arg) ...@@ -999,7 +1006,7 @@ static int live_hwsp_read(void *arg)
do { do {
struct i915_sw_fence *submit; struct i915_sw_fence *submit;
struct i915_request *rq; struct i915_request *rq;
u32 hwsp; u32 hwsp, dummy;
submit = heap_fence_create(GFP_KERNEL); submit = heap_fence_create(GFP_KERNEL);
if (!submit) { if (!submit) {
...@@ -1017,14 +1024,26 @@ static int live_hwsp_read(void *arg) ...@@ -1017,14 +1024,26 @@ static int live_hwsp_read(void *arg)
goto out; goto out;
} }
/* Skip to the end, saving 30 minutes of nops */
tl->seqno = -10u + 2 * (count & 3);
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
ce->timeline = intel_timeline_get(tl); ce->timeline = intel_timeline_get(tl);
rq = intel_context_create_request(ce); /* Ensure timeline is mapped, done during first pin */
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
goto out;
}
/*
* Start at a new wrap, and set seqno right before another wrap,
* saving 30 minutes of nops
*/
tl->seqno = -12u + 2 * (count & 3);
__intel_timeline_get_seqno(tl, &dummy);
rq = i915_request_create(ce);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
intel_context_unpin(ce);
intel_context_put(ce); intel_context_put(ce);
goto out; goto out;
} }
...@@ -1034,32 +1053,35 @@ static int live_hwsp_read(void *arg) ...@@ -1034,32 +1053,35 @@ static int live_hwsp_read(void *arg)
GFP_KERNEL); GFP_KERNEL);
if (err < 0) { if (err < 0) {
i915_request_add(rq); i915_request_add(rq);
intel_context_unpin(ce);
intel_context_put(ce); intel_context_put(ce);
goto out; goto out;
} }
mutex_lock(&watcher[0].rq->context->timeline->mutex); switch_tl_lock(rq, watcher[0].rq);
err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp); err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
if (err == 0) if (err == 0)
err = emit_read_hwsp(watcher[0].rq, /* before */ err = emit_read_hwsp(watcher[0].rq, /* before */
rq->fence.seqno, hwsp, rq->fence.seqno, hwsp,
&watcher[0].addr); &watcher[0].addr);
mutex_unlock(&watcher[0].rq->context->timeline->mutex); switch_tl_lock(watcher[0].rq, rq);
if (err) { if (err) {
i915_request_add(rq); i915_request_add(rq);
intel_context_unpin(ce);
intel_context_put(ce); intel_context_put(ce);
goto out; goto out;
} }
mutex_lock(&watcher[1].rq->context->timeline->mutex); switch_tl_lock(rq, watcher[1].rq);
err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp); err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
if (err == 0) if (err == 0)
err = emit_read_hwsp(watcher[1].rq, /* after */ err = emit_read_hwsp(watcher[1].rq, /* after */
rq->fence.seqno, hwsp, rq->fence.seqno, hwsp,
&watcher[1].addr); &watcher[1].addr);
mutex_unlock(&watcher[1].rq->context->timeline->mutex); switch_tl_lock(watcher[1].rq, rq);
if (err) { if (err) {
i915_request_add(rq); i915_request_add(rq);
intel_context_unpin(ce);
intel_context_put(ce); intel_context_put(ce);
goto out; goto out;
} }
...@@ -1068,6 +1090,7 @@ static int live_hwsp_read(void *arg) ...@@ -1068,6 +1090,7 @@ static int live_hwsp_read(void *arg)
i915_request_add(rq); i915_request_add(rq);
rq = wrap_timeline(rq); rq = wrap_timeline(rq);
intel_context_unpin(ce);
intel_context_put(ce); intel_context_put(ce);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
...@@ -1107,8 +1130,8 @@ static int live_hwsp_read(void *arg) ...@@ -1107,8 +1130,8 @@ static int live_hwsp_read(void *arg)
3 * watcher[1].rq->ring->size) 3 * watcher[1].rq->ring->size)
break; break;
} while (!__igt_timeout(end_time, NULL)); } while (!__igt_timeout(end_time, NULL) &&
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef); count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2);
pr_info("%s: simulated %lu wraps\n", engine->name, count); pr_info("%s: simulated %lu wraps\n", engine->name, count);
err = check_watcher(&watcher[1], "after", cmp_gte); err = check_watcher(&watcher[1], "after", cmp_gte);
...@@ -1153,9 +1176,7 @@ static int live_hwsp_rollover_kernel(void *arg) ...@@ -1153,9 +1176,7 @@ static int live_hwsp_rollover_kernel(void *arg)
} }
GEM_BUG_ON(i915_active_fence_isset(&tl->last_request)); GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
tl->seqno = 0; tl->seqno = -2u;
timeline_rollback(tl);
timeline_rollback(tl);
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
for (i = 0; i < ARRAY_SIZE(rq); i++) { for (i = 0; i < ARRAY_SIZE(rq); i++) {
...@@ -1235,11 +1256,10 @@ static int live_hwsp_rollover_user(void *arg) ...@@ -1235,11 +1256,10 @@ static int live_hwsp_rollover_user(void *arg)
goto out; goto out;
tl = ce->timeline; tl = ce->timeline;
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) if (!tl->has_initial_breadcrumb)
goto out; goto out;
timeline_rollback(tl); tl->seqno = -4u;
timeline_rollback(tl);
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
for (i = 0; i < ARRAY_SIZE(rq); i++) { for (i = 0; i < ARRAY_SIZE(rq); i++) {
......
...@@ -863,7 +863,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -863,7 +863,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->fence.seqno = seqno; rq->fence.seqno = seqno;
RCU_INIT_POINTER(rq->timeline, tl); RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno; rq->hwsp_seqno = tl->hwsp_seqno;
GEM_BUG_ON(__i915_request_is_complete(rq)); GEM_BUG_ON(__i915_request_is_complete(rq));
...@@ -1108,9 +1107,6 @@ emit_semaphore_wait(struct i915_request *to, ...@@ -1108,9 +1107,6 @@ emit_semaphore_wait(struct i915_request *to,
if (i915_request_has_initial_breadcrumb(to)) if (i915_request_has_initial_breadcrumb(to))
goto await_fence; goto await_fence;
if (!rcu_access_pointer(from->hwsp_cacheline))
goto await_fence;
/* /*
* If this or its dependents are waiting on an external fence * If this or its dependents are waiting on an external fence
* that may fail catastrophically, then we want to avoid using * that may fail catastrophically, then we want to avoid using
......
...@@ -237,16 +237,6 @@ struct i915_request { ...@@ -237,16 +237,6 @@ struct i915_request {
*/ */
const u32 *hwsp_seqno; const u32 *hwsp_seqno;
/*
* If we need to access the timeline's seqno for this request in
* another request, we need to keep a read reference to this associated
* cacheline, so that we do not free and recycle it before the foreign
* observers have completed. Hence, we keep a pointer to the cacheline
* inside the timeline's HWSP vma, but it is only valid while this
* request has not completed and guarded by the timeline mutex.
*/
struct intel_timeline_cacheline __rcu *hwsp_cacheline;
/** Position in the ring of the start of the request */ /** Position in the ring of the start of the request */
u32 head; u32 head;
...@@ -616,4 +606,25 @@ i915_request_active_timeline(const struct i915_request *rq) ...@@ -616,4 +606,25 @@ i915_request_active_timeline(const struct i915_request *rq)
lockdep_is_held(&rq->engine->active.lock)); lockdep_is_held(&rq->engine->active.lock));
} }
static inline u32
i915_request_active_seqno(const struct i915_request *rq)
{
u32 hwsp_phys_base =
page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
/*
* Because of wraparound, we cannot simply take tl->hwsp_offset,
* but instead use the fact that the relative for vaddr is the
* offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
* and combine them with the relative offset in rq->hwsp_seqno.
*
* As rw->hwsp_seqno is rewritten when signaled, this only works
* when the request isn't signaled yet, but at that point you
* no longer need the offset.
*/
return hwsp_phys_base + hwsp_relative_offset;
}
#endif /* I915_REQUEST_H */ #endif /* I915_REQUEST_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment