Commit 8ab3a381 authored by Chris Wilson's avatar Chris Wilson Committed by Joonas Lahtinen

drm/i915/gt: Incrementally check for rewinding

In commit 5ba32c7b ("drm/i915/execlists: Always force a context
reload when rewinding RING_TAIL"), we placed the check for rewinding a
context on actually submitting the next request in that context. This
was so that we only had to check once, and could do so with precision
avoiding as many forced restores as possible. For example, to ensure
that we can resubmit the same request a couple of times, we include a
small wa_tail such that on the next submission, the ring->tail will
appear to move forwards when resubmitting the same request. This is very
common as it will happen for every lite-restore to fill the second port
after a context switch.

However, intel_ring_direction() is limited in precision to movements of
upto half the ring size. The consequence being that if we tried to
unwind many requests, we could exceed half the ring and flip the sense
of the direction, so missing a force restore. As no request can be
greater than half the ring (i.e. 2048 bytes in the smallest case), we
can check for rollback incrementally. As we check against the tail that
would be submitted, we do not lose any sensitivity and allow lite
restores for the simple case. We still need to double check upon
submitting the context, to allow for multiple preemptions and
resubmissions.

Fixes: 5ba32c7b ("drm/i915/execlists: Always force a context reload when rewinding RING_TAIL")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: <stable@vger.kernel.org> # v5.4+
Reviewed-by: default avatarBruce Chang <yu.bruce.chang@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200609151723.12971-1-chris@chris-wilson.co.uk
(cherry picked from commit e36ba817)
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent a43555ac
...@@ -646,7 +646,7 @@ static int engine_setup_common(struct intel_engine_cs *engine) ...@@ -646,7 +646,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
struct measure_breadcrumb { struct measure_breadcrumb {
struct i915_request rq; struct i915_request rq;
struct intel_ring ring; struct intel_ring ring;
u32 cs[1024]; u32 cs[2048];
}; };
static int measure_breadcrumb_dw(struct intel_context *ce) static int measure_breadcrumb_dw(struct intel_context *ce)
...@@ -668,6 +668,8 @@ static int measure_breadcrumb_dw(struct intel_context *ce) ...@@ -668,6 +668,8 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
frame->ring.vaddr = frame->cs; frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs); frame->ring.size = sizeof(frame->cs);
frame->ring.wrap =
BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
frame->ring.effective_size = frame->ring.size; frame->ring.effective_size = frame->ring.size;
intel_ring_update_space(&frame->ring); intel_ring_update_space(&frame->ring);
frame->rq.ring = &frame->ring; frame->rq.ring = &frame->ring;
......
...@@ -1134,6 +1134,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -1134,6 +1134,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl); list_move(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
/* Check in case we rollback so far we wrap [size/2] */
if (intel_ring_direction(rq->ring,
intel_ring_wrap(rq->ring,
rq->tail),
rq->ring->tail) > 0)
rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
active = rq; active = rq;
} else { } else {
struct intel_engine_cs *owner = rq->context->engine; struct intel_engine_cs *owner = rq->context->engine;
...@@ -1498,8 +1505,9 @@ static u64 execlists_update_context(struct i915_request *rq) ...@@ -1498,8 +1505,9 @@ static u64 execlists_update_context(struct i915_request *rq)
* HW has a tendency to ignore us rewinding the TAIL to the end of * HW has a tendency to ignore us rewinding the TAIL to the end of
* an earlier request. * an earlier request.
*/ */
GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
prev = rq->ring->tail;
tail = intel_ring_set_tail(rq->ring, rq->tail); tail = intel_ring_set_tail(rq->ring, rq->tail);
prev = ce->lrc_reg_state[CTX_RING_TAIL];
if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
desc |= CTX_DESC_FORCE_RESTORE; desc |= CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state[CTX_RING_TAIL] = tail; ce->lrc_reg_state[CTX_RING_TAIL] = tail;
...@@ -4758,6 +4766,14 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode) ...@@ -4758,6 +4766,14 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode)
return 0; return 0;
} }
static void assert_request_valid(struct i915_request *rq)
{
struct intel_ring *ring __maybe_unused = rq->ring;
/* Can we unwind this request without appearing to go forwards? */
GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
}
/* /*
* Reserve space for 2 NOOPs at the end of each request to be * Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite * used as a workaround for not being allowed to do lite
...@@ -4770,6 +4786,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) ...@@ -4770,6 +4786,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
*cs++ = MI_NOOP; *cs++ = MI_NOOP;
request->wa_tail = intel_ring_offset(request, cs); request->wa_tail = intel_ring_offset(request, cs);
/* Check that entire request is less than half the ring */
assert_request_valid(request);
return cs; return cs;
} }
......
...@@ -315,3 +315,7 @@ int intel_ring_cacheline_align(struct i915_request *rq) ...@@ -315,3 +315,7 @@ int intel_ring_cacheline_align(struct i915_request *rq)
GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
return 0; return 0;
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_ring.c"
#endif
...@@ -18,6 +18,20 @@ struct live_mocs { ...@@ -18,6 +18,20 @@ struct live_mocs {
void *vaddr; void *vaddr;
}; };
static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
{
struct intel_context *ce;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return ce;
/* We build large requests to read the registers from the ring */
ce->ring = __intel_context_ring_size(SZ_16K);
return ce;
}
static int request_add_sync(struct i915_request *rq, int err) static int request_add_sync(struct i915_request *rq, int err)
{ {
i915_request_get(rq); i915_request_get(rq);
...@@ -301,7 +315,7 @@ static int live_mocs_clean(void *arg) ...@@ -301,7 +315,7 @@ static int live_mocs_clean(void *arg)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
struct intel_context *ce; struct intel_context *ce;
ce = intel_context_create(engine); ce = mocs_context_create(engine);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
err = PTR_ERR(ce); err = PTR_ERR(ce);
break; break;
...@@ -395,7 +409,7 @@ static int live_mocs_reset(void *arg) ...@@ -395,7 +409,7 @@ static int live_mocs_reset(void *arg)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
struct intel_context *ce; struct intel_context *ce;
ce = intel_context_create(engine); ce = mocs_context_create(engine);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
err = PTR_ERR(ce); err = PTR_ERR(ce);
break; break;
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2020 Intel Corporation
*/
static struct intel_ring *mock_ring(unsigned long sz)
{
struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
kref_init(&ring->ref);
ring->size = sz;
ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz);
ring->effective_size = sz;
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
intel_ring_update_space(ring);
return ring;
}
static void mock_ring_free(struct intel_ring *ring)
{
kfree(ring);
}
static int check_ring_direction(struct intel_ring *ring,
u32 next, u32 prev,
int expected)
{
int result;
result = intel_ring_direction(ring, next, prev);
if (result < 0)
result = -1;
else if (result > 0)
result = 1;
if (result != expected) {
pr_err("intel_ring_direction(%u, %u):%d != %d\n",
next, prev, result, expected);
return -EINVAL;
}
return 0;
}
static int check_ring_step(struct intel_ring *ring, u32 x, u32 step)
{
u32 prev = x, next = intel_ring_wrap(ring, x + step);
int err = 0;
err |= check_ring_direction(ring, next, next, 0);
err |= check_ring_direction(ring, prev, prev, 0);
err |= check_ring_direction(ring, next, prev, 1);
err |= check_ring_direction(ring, prev, next, -1);
return err;
}
static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step)
{
int err = 0;
err |= check_ring_step(ring, x, step);
err |= check_ring_step(ring, intel_ring_wrap(ring, x + 1), step);
err |= check_ring_step(ring, intel_ring_wrap(ring, x - 1), step);
return err;
}
static int igt_ring_direction(void *dummy)
{
struct intel_ring *ring;
unsigned int half = 2048;
int step, err = 0;
ring = mock_ring(2 * half);
if (!ring)
return -ENOMEM;
GEM_BUG_ON(ring->size != 2 * half);
/* Precision of wrap detection is limited to ring->size / 2 */
for (step = 1; step < half; step <<= 1) {
err |= check_ring_offset(ring, 0, step);
err |= check_ring_offset(ring, half, step);
}
err |= check_ring_step(ring, 0, half - 64);
/* And check unwrapped handling for good measure */
err |= check_ring_offset(ring, 0, 2 * half + 64);
err |= check_ring_offset(ring, 3 * half, 1);
mock_ring_free(ring);
return err;
}
int intel_ring_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_ring_direction),
};
return i915_subtests(tests, NULL);
}
...@@ -21,6 +21,7 @@ selftest(fence, i915_sw_fence_mock_selftests) ...@@ -21,6 +21,7 @@ selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests) selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests) selftest(uncore, intel_uncore_mock_selftests)
selftest(ring, intel_ring_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, intel_timeline_mock_selftests) selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests) selftest(requests, i915_request_mock_selftests)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment