Commit f2085c8e authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Remove accidental serialization between gpu_fill

Upon object creation for live_gem_contexts, we fill the object with
known scratch and flush it out of the CPU cache. Before performing the
GPU fill, we don't need to flush it again and so avoid serialising with
previous fills.

However, we do need some throttling on the internal interfaces if we do
not want to run out of memory!
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190827161726.3640-1-chris@chris-wilson.co.uk
parent 8a9a9827
...@@ -180,12 +180,6 @@ static int gpu_fill(struct intel_context *ce, ...@@ -180,12 +180,6 @@ static int gpu_fill(struct intel_context *ce,
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
return err;
err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER); err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
if (err) if (err)
return err; return err;
...@@ -343,6 +337,45 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) ...@@ -343,6 +337,45 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
return npages / DW_PER_PAGE; return npages / DW_PER_PAGE;
} }
static void throttle_release(struct i915_request **q, int count)
{
int i;
for (i = 0; i < count; i++) {
if (IS_ERR_OR_NULL(q[i]))
continue;
i915_request_put(fetch_and_zero(&q[i]));
}
}
static int throttle(struct intel_context *ce,
struct i915_request **q, int count)
{
int i;
if (!IS_ERR_OR_NULL(q[0])) {
if (i915_request_wait(q[0],
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT) < 0)
return -EINTR;
i915_request_put(q[0]);
}
for (i = 0; i < count - 1; i++)
q[i] = q[i + 1];
q[i] = intel_context_create_request(ce);
if (IS_ERR(q[i]))
return PTR_ERR(q[i]);
i915_request_get(q[i]);
i915_request_add(q[i]);
return 0;
}
static int igt_ctx_exec(void *arg) static int igt_ctx_exec(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
...@@ -362,6 +395,7 @@ static int igt_ctx_exec(void *arg) ...@@ -362,6 +395,7 @@ static int igt_ctx_exec(void *arg)
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw; unsigned long ncontexts, ndwords, dw;
struct i915_request *tq[5] = {};
struct igt_live_test t; struct igt_live_test t;
struct drm_file *file; struct drm_file *file;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
...@@ -409,13 +443,18 @@ static int igt_ctx_exec(void *arg) ...@@ -409,13 +443,18 @@ static int igt_ctx_exec(void *arg)
} }
err = gpu_fill(ce, obj, dw); err = gpu_fill(ce, obj, dw);
intel_context_put(ce);
if (err) { if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id, engine->name, ctx->hw_id,
yesno(!!ctx->vm), err); yesno(!!ctx->vm), err);
intel_context_put(ce);
goto out_unlock;
}
err = throttle(ce, tq, ARRAY_SIZE(tq));
if (err) {
intel_context_put(ce);
goto out_unlock; goto out_unlock;
} }
...@@ -426,6 +465,8 @@ static int igt_ctx_exec(void *arg) ...@@ -426,6 +465,8 @@ static int igt_ctx_exec(void *arg)
ndwords++; ndwords++;
ncontexts++; ncontexts++;
intel_context_put(ce);
} }
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
...@@ -444,6 +485,7 @@ static int igt_ctx_exec(void *arg) ...@@ -444,6 +485,7 @@ static int igt_ctx_exec(void *arg)
} }
out_unlock: out_unlock:
throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t)) if (igt_live_test_end(&t))
err = -EIO; err = -EIO;
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
...@@ -461,6 +503,7 @@ static int igt_ctx_exec(void *arg) ...@@ -461,6 +503,7 @@ static int igt_ctx_exec(void *arg)
static int igt_shared_ctx_exec(void *arg) static int igt_shared_ctx_exec(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct i915_request *tq[5] = {};
struct i915_gem_context *parent; struct i915_gem_context *parent;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
...@@ -535,14 +578,20 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -535,14 +578,20 @@ static int igt_shared_ctx_exec(void *arg)
} }
err = gpu_fill(ce, obj, dw); err = gpu_fill(ce, obj, dw);
intel_context_put(ce);
kernel_context_close(ctx);
if (err) { if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id, engine->name, ctx->hw_id,
yesno(!!ctx->vm), err); yesno(!!ctx->vm), err);
intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
err = throttle(ce, tq, ARRAY_SIZE(tq));
if (err) {
intel_context_put(ce);
kernel_context_close(ctx);
goto out_test; goto out_test;
} }
...@@ -553,6 +602,9 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -553,6 +602,9 @@ static int igt_shared_ctx_exec(void *arg)
ndwords++; ndwords++;
ncontexts++; ncontexts++;
intel_context_put(ce);
kernel_context_close(ctx);
} }
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
ncontexts, engine->name, ndwords); ncontexts, engine->name, ndwords);
...@@ -574,6 +626,7 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -574,6 +626,7 @@ static int igt_shared_ctx_exec(void *arg)
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
} }
out_test: out_test:
throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t)) if (igt_live_test_end(&t))
err = -EIO; err = -EIO;
out_unlock: out_unlock:
...@@ -1050,6 +1103,7 @@ static int igt_ctx_readonly(void *arg) ...@@ -1050,6 +1103,7 @@ static int igt_ctx_readonly(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
struct i915_request *tq[5] = {};
struct i915_address_space *vm; struct i915_address_space *vm;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
unsigned long idx, ndwords, dw; unsigned long idx, ndwords, dw;
...@@ -1121,6 +1175,12 @@ static int igt_ctx_readonly(void *arg) ...@@ -1121,6 +1175,12 @@ static int igt_ctx_readonly(void *arg)
goto out_unlock; goto out_unlock;
} }
err = throttle(ce, tq, ARRAY_SIZE(tq));
if (err) {
i915_gem_context_unlock_engines(ctx);
goto out_unlock;
}
if (++dw == max_dwords(obj)) { if (++dw == max_dwords(obj)) {
obj = NULL; obj = NULL;
dw = 0; dw = 0;
...@@ -1151,6 +1211,7 @@ static int igt_ctx_readonly(void *arg) ...@@ -1151,6 +1211,7 @@ static int igt_ctx_readonly(void *arg)
} }
out_unlock: out_unlock:
throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t)) if (igt_live_test_end(&t))
err = -EIO; err = -EIO;
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment