Commit 4fe13f28 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Add tests for timeslicing virtual engines

Make sure that we can execute a virtual request on an already busy
engine, and conversely that we can execute a normal request if the
engines are already fully occupied by virtual requests.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200519132046.22443-1-chris@chris-wilson.co.uk
parent f73fbb5a
......@@ -3591,9 +3591,11 @@ static int nop_virtual_engine(struct intel_gt *gt,
return err;
}
static unsigned int select_siblings(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings)
static unsigned int
__select_siblings(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings,
bool (*filter)(const struct intel_engine_cs *))
{
unsigned int n = 0;
unsigned int inst;
......@@ -3602,12 +3604,23 @@ static unsigned int select_siblings(struct intel_gt *gt,
if (!gt->engine_class[class][inst])
continue;
if (filter && !filter(gt->engine_class[class][inst]))
continue;
siblings[n++] = gt->engine_class[class][inst];
}
return n;
}
static unsigned int
select_siblings(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings)
{
return __select_siblings(gt, class, siblings, NULL);
}
static int live_virtual_engine(void *arg)
{
struct intel_gt *gt = arg;
......@@ -3762,6 +3775,186 @@ static int live_virtual_mask(void *arg)
return 0;
}
static long slice_timeout(struct intel_engine_cs *engine)
{
long timeout;
/* Enough time for a timeslice to kick in, and kick out */
timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
/* Enough time for the nop request to complete */
timeout += HZ / 5;
return timeout;
}
static int slicein_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
const long timeout = slice_timeout(siblings[0]);
struct intel_context *ce;
struct i915_request *rq;
struct igt_spinner spin;
unsigned int n;
int err = 0;
/*
* Virtual requests must take part in timeslicing on the target engines.
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
for (n = 0; n < nsibling; n++) {
ce = intel_context_create(siblings[n]);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_add(rq);
}
ce = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = intel_context_create_request(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, timeout) < 0) {
GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
__func__, rq->engine->name);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
}
i915_request_put(rq);
out:
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915))
err = -EIO;
igt_spinner_fini(&spin);
return err;
}
static int sliceout_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
const long timeout = slice_timeout(siblings[0]);
struct intel_context *ce;
struct i915_request *rq;
struct igt_spinner spin;
unsigned int n;
int err = 0;
/*
* Virtual requests must allow others a fair timeslice.
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
/* XXX We do not handle oversubscription and fairness with normal rq */
for (n = 0; n < nsibling; n++) {
ce = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_add(rq);
}
for (n = 0; !err && n < nsibling; n++) {
ce = intel_context_create(siblings[n]);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = intel_context_create_request(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, timeout) < 0) {
GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
__func__, siblings[n]->name);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
}
i915_request_put(rq);
}
out:
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915))
err = -EIO;
igt_spinner_fini(&spin);
return err;
}
static int live_virtual_slice(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
unsigned int class;
int err;
if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
nsibling = __select_siblings(gt, class, siblings,
intel_engine_has_timeslices);
if (nsibling < 2)
continue;
err = slicein_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
err = sliceout_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
}
return 0;
}
static int preserved_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
......@@ -4297,6 +4490,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_slice),
SUBTEST(live_virtual_bond),
SUBTEST(live_virtual_reset),
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment