Commit 4ce0c8e7 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin Committed by Andi Shyti

drm/i915/selftests: Fix live_requests for all engines

After the abandonment of i915->kernel_context and since we have started to
create per-gt engine->kernel_context, these tests need to be updated to
instantiate the batch buffer VMA in the correct PPGTT for the context used
to execute each spinner.

v2(Tejas):
  - Clean commit message - Matt
  - Add BUG_ON to match vm
v3(Tejas):
  - Fix dim checkpatch warnings
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: default avatarTejas Upadhyay <tejas.upadhyay@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Signed-off-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230228044307.191639-1-tejas.upadhyay@intel.com
parent abd74d26
...@@ -957,18 +957,18 @@ static int live_cancel_request(void *arg) ...@@ -957,18 +957,18 @@ static int live_cancel_request(void *arg)
return 0; return 0;
} }
static struct i915_vma *empty_batch(struct drm_i915_private *i915) static struct i915_vma *empty_batch(struct intel_gt *gt)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
u32 *cmd; u32 *cmd;
int err; int err;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cmd)) { if (IS_ERR(cmd)) {
err = PTR_ERR(cmd); err = PTR_ERR(cmd);
goto err; goto err;
...@@ -979,15 +979,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) ...@@ -979,15 +979,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64); __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(to_gt(i915)); intel_gt_chipset_flush(gt);
vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL); vma = i915_vma_instance(obj, gt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err;
} }
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL); err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err) if (err)
goto err; goto err;
...@@ -1005,6 +1005,14 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) ...@@ -1005,6 +1005,14 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
return ERR_PTR(err); return ERR_PTR(err);
} }
static int emit_bb_start(struct i915_request *rq, struct i915_vma *batch)
{
return rq->engine->emit_bb_start(rq,
i915_vma_offset(batch),
i915_vma_size(batch),
0);
}
static struct i915_request * static struct i915_request *
empty_request(struct intel_engine_cs *engine, empty_request(struct intel_engine_cs *engine,
struct i915_vma *batch) struct i915_vma *batch)
...@@ -1016,10 +1024,7 @@ empty_request(struct intel_engine_cs *engine, ...@@ -1016,10 +1024,7 @@ empty_request(struct intel_engine_cs *engine,
if (IS_ERR(request)) if (IS_ERR(request))
return request; return request;
err = engine->emit_bb_start(request, err = emit_bb_start(request, batch);
i915_vma_offset(batch),
i915_vma_size(batch),
I915_DISPATCH_SECURE);
if (err) if (err)
goto out_request; goto out_request;
...@@ -1034,8 +1039,7 @@ static int live_empty_request(void *arg) ...@@ -1034,8 +1039,7 @@ static int live_empty_request(void *arg)
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct igt_live_test t; struct igt_live_test t;
struct i915_vma *batch; int err;
int err = 0;
/* /*
* Submit various sized batches of empty requests, to each engine * Submit various sized batches of empty requests, to each engine
...@@ -1043,16 +1047,17 @@ static int live_empty_request(void *arg) ...@@ -1043,16 +1047,17 @@ static int live_empty_request(void *arg)
* the overhead of submitting requests to the hardware. * the overhead of submitting requests to the hardware.
*/ */
batch = empty_batch(i915);
if (IS_ERR(batch))
return PTR_ERR(batch);
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
struct i915_request *request; struct i915_request *request;
struct i915_vma *batch;
unsigned long n, prime; unsigned long n, prime;
ktime_t times[2] = {}; ktime_t times[2] = {};
batch = empty_batch(engine->gt);
if (IS_ERR(batch))
return PTR_ERR(batch);
err = igt_live_test_begin(&t, i915, __func__, engine->name); err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err) if (err)
goto out_batch; goto out_batch;
...@@ -1100,27 +1105,30 @@ static int live_empty_request(void *arg) ...@@ -1100,27 +1105,30 @@ static int live_empty_request(void *arg)
engine->name, engine->name,
ktime_to_ns(times[0]), ktime_to_ns(times[0]),
prime, div64_u64(ktime_to_ns(times[1]), prime)); prime, div64_u64(ktime_to_ns(times[1]), prime));
out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
if (err)
break;
} }
out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
return err; return err;
} }
static struct i915_vma *recursive_batch(struct drm_i915_private *i915) static struct i915_vma *recursive_batch(struct intel_gt *gt)
{ {
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const int ver = GRAPHICS_VER(i915); const int ver = GRAPHICS_VER(i915);
struct i915_vma *vma; struct i915_vma *vma;
u32 *cmd; u32 *cmd;
int err; int err;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL); vma = i915_vma_instance(obj, gt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err;
...@@ -1152,7 +1160,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) ...@@ -1152,7 +1160,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64); __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(to_gt(i915)); intel_gt_chipset_flush(gt);
return vma; return vma;
...@@ -1186,7 +1194,6 @@ static int live_all_engines(void *arg) ...@@ -1186,7 +1194,6 @@ static int live_all_engines(void *arg)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_request **request; struct i915_request **request;
struct igt_live_test t; struct igt_live_test t;
struct i915_vma *batch;
unsigned int idx; unsigned int idx;
int err; int err;
...@@ -1204,42 +1211,44 @@ static int live_all_engines(void *arg) ...@@ -1204,42 +1211,44 @@ static int live_all_engines(void *arg)
if (err) if (err)
goto out_free; goto out_free;
batch = recursive_batch(i915);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
goto out_free;
}
i915_vma_lock(batch);
idx = 0; idx = 0;
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct i915_vma *batch;
batch = recursive_batch(engine->gt);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n",
__func__, err);
goto out_free;
}
i915_vma_lock(batch);
request[idx] = intel_engine_create_kernel_request(engine); request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) { if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]); err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n", pr_err("%s: Request allocation failed with err=%d\n",
__func__, err); __func__, err);
goto out_request; goto out_unlock;
} }
GEM_BUG_ON(request[idx]->context->vm != batch->vm);
err = i915_vma_move_to_active(batch, request[idx], 0); err = i915_vma_move_to_active(batch, request[idx], 0);
GEM_BUG_ON(err); GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx], err = emit_bb_start(request[idx], batch);
i915_vma_offset(batch),
i915_vma_size(batch),
0);
GEM_BUG_ON(err); GEM_BUG_ON(err);
request[idx]->batch = batch; request[idx]->batch = batch;
i915_request_get(request[idx]); i915_request_get(request[idx]);
i915_request_add(request[idx]); i915_request_add(request[idx]);
idx++; idx++;
out_unlock:
i915_vma_unlock(batch);
if (err)
goto out_request;
} }
i915_vma_unlock(batch);
idx = 0; idx = 0;
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
if (i915_request_completed(request[idx])) { if (i915_request_completed(request[idx])) {
...@@ -1251,17 +1260,23 @@ static int live_all_engines(void *arg) ...@@ -1251,17 +1260,23 @@ static int live_all_engines(void *arg)
idx++; idx++;
} }
err = recursive_batch_resolve(batch); idx = 0;
if (err) { for_each_uabi_engine(engine, i915) {
pr_err("%s: failed to resolve batch, err=%d\n", __func__, err); err = recursive_batch_resolve(request[idx]->batch);
goto out_request; if (err) {
pr_err("%s: failed to resolve batch, err=%d\n",
__func__, err);
goto out_request;
}
idx++;
} }
idx = 0; idx = 0;
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct i915_request *rq = request[idx];
long timeout; long timeout;
timeout = i915_request_wait(request[idx], 0, timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) { if (timeout < 0) {
err = timeout; err = timeout;
...@@ -1270,8 +1285,10 @@ static int live_all_engines(void *arg) ...@@ -1270,8 +1285,10 @@ static int live_all_engines(void *arg)
goto out_request; goto out_request;
} }
GEM_BUG_ON(!i915_request_completed(request[idx])); GEM_BUG_ON(!i915_request_completed(rq));
i915_request_put(request[idx]); i915_vma_unpin(rq->batch);
i915_vma_put(rq->batch);
i915_request_put(rq);
request[idx] = NULL; request[idx] = NULL;
idx++; idx++;
} }
...@@ -1281,12 +1298,18 @@ static int live_all_engines(void *arg) ...@@ -1281,12 +1298,18 @@ static int live_all_engines(void *arg)
out_request: out_request:
idx = 0; idx = 0;
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
if (request[idx]) struct i915_request *rq = request[idx];
i915_request_put(request[idx]);
if (!rq)
continue;
if (rq->batch) {
i915_vma_unpin(rq->batch);
i915_vma_put(rq->batch);
}
i915_request_put(rq);
idx++; idx++;
} }
i915_vma_unpin(batch);
i915_vma_put(batch);
out_free: out_free:
kfree(request); kfree(request);
return err; return err;
...@@ -1322,7 +1345,7 @@ static int live_sequential_engines(void *arg) ...@@ -1322,7 +1345,7 @@ static int live_sequential_engines(void *arg)
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct i915_vma *batch; struct i915_vma *batch;
batch = recursive_batch(i915); batch = recursive_batch(engine->gt);
if (IS_ERR(batch)) { if (IS_ERR(batch)) {
err = PTR_ERR(batch); err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n", pr_err("%s: Unable to create batch for %s, err=%d\n",
...@@ -1338,6 +1361,7 @@ static int live_sequential_engines(void *arg) ...@@ -1338,6 +1361,7 @@ static int live_sequential_engines(void *arg)
__func__, engine->name, err); __func__, engine->name, err);
goto out_unlock; goto out_unlock;
} }
GEM_BUG_ON(request[idx]->context->vm != batch->vm);
if (prev) { if (prev) {
err = i915_request_await_dma_fence(request[idx], err = i915_request_await_dma_fence(request[idx],
...@@ -1353,10 +1377,7 @@ static int live_sequential_engines(void *arg) ...@@ -1353,10 +1377,7 @@ static int live_sequential_engines(void *arg)
err = i915_vma_move_to_active(batch, request[idx], 0); err = i915_vma_move_to_active(batch, request[idx], 0);
GEM_BUG_ON(err); GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx], err = emit_bb_start(request[idx], batch);
i915_vma_offset(batch),
i915_vma_size(batch),
0);
GEM_BUG_ON(err); GEM_BUG_ON(err);
request[idx]->batch = batch; request[idx]->batch = batch;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment