Commit d4b02a4c authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Trim execlists runtime

Reduce the smoke depth by trimming the number of contexts, repetitions
and wait times. This is in preparation for a less greedy scheduler that
tries to be fair across contexts, resulting in a great many more context
switches. A thousand context switches may be 50-100ms, causing us to
timeout as the HW is not fast enough to complete the deep smoketests.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200607222108.14401-5-chris@chris-wilson.co.uk
parent 3d09677a
......@@ -845,10 +845,11 @@ static int live_timeslice_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct i915_vma *vma;
void *vaddr;
int err = 0;
int count;
/*
* If a request takes too long, we would like to give other users
......@@ -885,26 +886,21 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_pin;
for_each_prime_number_from(count, 1, 16) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
if (!intel_engine_has_preemption(engine))
continue;
for_each_engine(engine, gt, id) {
if (!intel_engine_has_preemption(engine))
continue;
memset(vaddr, 0, PAGE_SIZE);
memset(vaddr, 0, PAGE_SIZE);
engine_heartbeat_disable(engine);
err = slice_semaphore_queue(engine, vma, count);
engine_heartbeat_enable(engine);
if (err)
goto err_pin;
engine_heartbeat_disable(engine);
err = slice_semaphore_queue(engine, vma, 5);
engine_heartbeat_enable(engine);
if (err)
goto err_pin;
if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_pin;
}
if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_pin;
}
}
......@@ -1251,22 +1247,6 @@ static int live_timeslice_queue(void *arg)
intel_engine_flush_submission(engine);
} while (READ_ONCE(engine->execlists.pending[0]));
if (!READ_ONCE(engine->execlists.timer.expires) &&
execlists_active(&engine->execlists) == rq &&
!i915_request_completed(rq)) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n",
engine->name);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
GEM_TRACE_DUMP();
memset(vaddr, 0xff, PAGE_SIZE);
err = -EINVAL;
}
/* Timeslice every jiffy, so within 2 we should signal */
if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
struct drm_printer p =
......@@ -2671,16 +2651,8 @@ static int live_preempt_gang(void *arg)
/* Submit each spinner at increasing priority */
engine->schedule(rq, &attr);
if (prio < attr.priority)
break;
if (prio <= I915_PRIORITY_MAX)
continue;
if (__igt_timeout(end_time, NULL))
break;
} while (1);
} while (prio <= I915_PRIORITY_MAX &&
!__igt_timeout(end_time, NULL));
pr_debug("%s: Preempt chain of %d requests\n",
engine->name, prio);
......@@ -3248,7 +3220,7 @@ static int smoke_crescendo_thread(void *arg)
return err;
count++;
} while (!__igt_timeout(end_time, NULL));
} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
smoke->count = count;
return 0;
......@@ -3324,7 +3296,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
count++;
}
} while (!__igt_timeout(end_time, NULL));
} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
......@@ -3337,7 +3309,7 @@ static int live_preempt_smoke(void *arg)
struct preempt_smoke smoke = {
.gt = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
.ncontext = 256,
};
const unsigned int phase[] = { 0, BATCH };
struct igt_live_test t;
......
......@@ -221,8 +221,8 @@ bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
10) &&
100) &&
wait_for(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
1000));
50));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment