Commit 94ed4753 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Make the hanging request non-preemptible

In some of our hangtests, we try to reset an active engine while it is
spinning inside the recursive spinner. However, we also try to flood the
engine with requests that preempt the hang, and so should disable the
preemption to be sure that we reset the right request.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200607222108.14401-2-chris@chris-wilson.co.uk
parent 8733a063
...@@ -203,12 +203,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) ...@@ -203,12 +203,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq)); *batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno; *batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK; *batch++ = MI_NOOP;
memset(batch, 0, 1024); memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch); batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK; *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start); *batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start); *batch++ = upper_32_bits(vma->node.start);
...@@ -217,12 +217,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) ...@@ -217,12 +217,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = 0; *batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno; *batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK; *batch++ = MI_NOOP;
memset(batch, 0, 1024); memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch); batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK; *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8; *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start); *batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(gt->i915) >= 4) { } else if (INTEL_GEN(gt->i915) >= 4) {
...@@ -230,24 +230,24 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) ...@@ -230,24 +230,24 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = 0; *batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno; *batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK; *batch++ = MI_NOOP;
memset(batch, 0, 1024); memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch); batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK; *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start); *batch++ = lower_32_bits(vma->node.start);
} else { } else {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno; *batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK; *batch++ = MI_NOOP;
memset(batch, 0, 1024); memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch); batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK; *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start); *batch++ = lower_32_bits(vma->node.start);
} }
...@@ -866,13 +866,29 @@ static int __igt_reset_engines(struct intel_gt *gt, ...@@ -866,13 +866,29 @@ static int __igt_reset_engines(struct intel_gt *gt,
count++; count++;
if (rq) { if (rq) {
if (rq->fence.error != -EIO) {
pr_err("i915_reset_engine(%s:%s):"
" failed to reset request %llx:%lld\n",
engine->name, test_name,
rq->fence.context,
rq->fence.seqno);
i915_request_put(rq);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) { if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p = struct drm_printer p =
drm_info_printer(gt->i915->drm.dev); drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):" pr_err("i915_reset_engine(%s:%s):"
" failed to complete request after reset\n", " failed to complete request %llx:%lld after reset\n",
engine->name, test_name); engine->name, test_name,
rq->fence.context,
rq->fence.seqno);
intel_engine_dump(engine, &p, intel_engine_dump(engine, &p,
"%s\n", engine->name); "%s\n", engine->name);
i915_request_put(rq); i915_request_put(rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment